summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio8
-rw-r--r--Documentation/ABI/testing/sysfs-class-net19
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-janz-ican319
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe-phy.txt48
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt40
-rw-r--r--Documentation/devicetree/bindings/net/ipq806x-dwmac.txt35
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt20
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt3
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt48
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt25
-rw-r--r--Documentation/networking/bonding.txt84
-rw-r--r--Documentation/networking/can.txt3
-rw-r--r--Documentation/networking/dctcp.txt1
-rw-r--r--Documentation/networking/ieee802154.txt32
-rw-r--r--Documentation/networking/ip-sysctl.txt25
-rw-r--r--Documentation/networking/pktgen.txt150
-rw-r--r--Documentation/networking/switchdev.txt426
-rw-r--r--Documentation/networking/tc-actions-env-rules.txt6
-rw-r--r--Documentation/s390/qeth.txt4
-rw-r--r--MAINTAINERS25
-rw-r--r--arch/arm/net/bpf_jit_32.c10
-rw-r--r--arch/s390/net/bpf_jit.h10
-rw-r--r--arch/s390/net/bpf_jit_comp.c106
-rw-r--r--arch/x86/net/bpf_jit_comp.c150
-rw-r--r--crypto/af_alg.c4
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/idt77105.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/bcma/driver_gpio.c20
-rw-r--r--drivers/block/drbd/drbd_receiver.c4
-rw-r--r--drivers/bluetooth/Kconfig15
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c6
-rw-r--r--drivers/bluetooth/btintel.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btrtl.c390
-rw-r--r--drivers/bluetooth/btrtl.h52
-rw-r--r--drivers/bluetooth/btusb.c434
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcsp.c4
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c594
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c7
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c75
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c299
-rw-r--r--drivers/infiniband/hw/mlx5/main.c646
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h23
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c47
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c89
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c11
-rw-r--r--drivers/isdn/capi/capidrv.c4
-rw-r--r--drivers/isdn/hisax/st5481_usb.c4
-rw-r--r--drivers/isdn/mISDN/socket.c12
-rw-r--r--drivers/mfd/janz-cmodio.c4
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_3ad.c26
-rw-r--r--drivers/net/bonding/bond_main.c55
-rw-r--r--drivers/net/bonding/bond_netlink.c50
-rw-r--r--drivers/net/bonding/bond_options.c91
-rw-r--r--drivers/net/bonding/bond_procfs.c8
-rw-r--r--drivers/net/bonding/bond_sysfs.c46
-rw-r--r--drivers/net/can/flexcan.c53
-rw-r--r--drivers/net/can/janz-ican3.c125
-rw-r--r--drivers/net/can/spi/mcp251x.c9
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c186
-rw-r--r--drivers/net/dsa/mv88e6131.c185
-rw-r--r--drivers/net/dsa/mv88e6171.c234
-rw-r--r--drivers/net/dsa/mv88e6352.c188
-rw-r--r--drivers/net/dsa/mv88e6xxx.c640
-rw-r--r--drivers/net/dsa/mv88e6xxx.h99
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h155
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c35
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c99
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c341
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c79
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c384
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1332
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h236
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c36
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c207
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h30
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c200
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h49
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c62
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h20
-rw-r--r--drivers/net/ethernet/broadcom/b44.h8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c170
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h31
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c67
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h14
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h16
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c61
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h23
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h84
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h30
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h176
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h186
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h70
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c665
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h19
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c101
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c61
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h62
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/cadence/macb.h9
-rw-r--r--drivers/net/ethernet/cavium/Kconfig57
-rw-r--r--drivers/net/ethernet/cavium/Makefile5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c796
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h107
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h535
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c198
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h33
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1216
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c3668
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h673
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h57
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h424
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c723
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1309
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h649
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c989
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h426
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h319
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h237
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c199
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h75
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h224
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c189
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h227
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c766
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c178
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h140
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h422
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c932
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h213
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c600
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1331
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1545
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h381
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h701
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c966
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h220
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h220
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c363
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c489
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c382
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2215
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h184
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h72
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c123
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c36
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c31
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig9
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h55
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c56
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h50
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c500
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c41
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h5
-rw-r--r--drivers/net/ethernet/intel/e100.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c26
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c205
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c146
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c160
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c158
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c91
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h272
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c896
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c400
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h523
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c679
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c860
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1906
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c356
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/flow_table.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c343
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c360
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h171
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c77
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c46
-rw-r--r--drivers/net/ethernet/renesas/Kconfig31
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/renesas/ravb.h832
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1824
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c357
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1579
-rw-r--r--drivers/net/ethernet/rocker/rocker.h28
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c1147
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c783
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h69
-rw-r--r--drivers/net/ethernet/sfc/efx.c316
-rw-r--r--drivers/net/ethernet/sfc/efx.h15
-rw-r--r--drivers/net/ethernet/sfc/enum.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon.c33
-rw-r--r--drivers/net/ethernet/sfc/farch.c64
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c228
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h16
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h434
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h34
-rw-r--r--drivers/net/ethernet/sfc/nic.h251
-rw-r--r--drivers/net/ethernet/sfc/ptp.c40
-rw-r--r--drivers/net/ethernet/sfc/siena.c27
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c156
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h79
-rw-r--r--drivers/net/ethernet/sfc/sriov.c83
-rw-r--r--drivers/net/ethernet/sfc/sriov.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c365
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c99
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c3
-rw-r--r--drivers/net/ethernet/tile/tilepro.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c24
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c249
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c16
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h108
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c288
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c30
-rw-r--r--drivers/net/fddi/skfp/srf.c2
-rw-r--r--drivers/net/geneve.c523
-rw-r--r--drivers/net/hyperv/hyperv_net.h10
-rw-r--r--drivers/net/hyperv/netvsc.c54
-rw-r--r--drivers/net/hyperv/netvsc_drv.c97
-rw-r--r--drivers/net/hyperv/rndis_filter.c16
-rw-r--r--drivers/net/ieee802154/Kconfig10
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/at86rf230.c376
-rw-r--r--drivers/net/ieee802154/at86rf230.h220
-rw-r--r--drivers/net/ieee802154/atusb.c699
-rw-r--r--drivers/net/ieee802154/atusb.h84
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/fakelb.c209
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/ipvlan/ipvlan.h5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c138
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c25
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/macvtap.c34
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c1901
-rw-r--r--drivers/net/phy/bcm7xxx.c7
-rw-r--r--drivers/net/phy/davicom.c13
-rw-r--r--drivers/net/phy/dp83867.c239
-rw-r--r--drivers/net/phy/icplus.c5
-rw-r--r--drivers/net/phy/marvell.c10
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c8
-rw-r--r--drivers/net/phy/mdio-bitbang.c7
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/micrel.c53
-rw-r--r--drivers/net/phy/phy.c34
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/ppp/pppox.c2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/tun.c26
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/dscc4.c9
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c35
-rw-r--r--drivers/net/wireless/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c9
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c165
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h56
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c50
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c98
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h132
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c208
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c34
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h53
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2767
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h29
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.c156
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h95
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c134
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h194
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c579
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h168
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c276
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h230
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c321
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h40
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c12
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c155
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c144
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c740
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c5
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c72
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h4
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c128
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c107
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c31
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c375
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h27
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c48
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h24
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h24
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c70
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h40
-rw-r--r--drivers/net/wireless/b43/main.c12
-rw-r--r--drivers/net/wireless/b43legacy/main.c13
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c206
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/commonring.c18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.c217
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/firmware.h6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/flowring.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c20
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c184
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/cw1200/main.c16
-rw-r--r--drivers/net/wireless/cw1200/sta.c10
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c10
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c16
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h107
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c113
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c103
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c22
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h63
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c443
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h162
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c16
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c44
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c1418
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c40
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c415
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c135
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/libertas/cfg.c13
-rw-r--r--drivers/net/wireless/libertas/cfg.h3
-rw-r--r--drivers/net/wireless/libertas/cmd.h3
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c13
-rw-r--r--drivers/net/wireless/libertas_tf/main.c9
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c77
-rw-r--r--drivers/net/wireless/mediatek/Kconfig10
-rw-r--r--drivers/net/wireless/mediatek/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Makefile9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/core.c78
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c172
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c533
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h127
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c414
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h151
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c625
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals.h164
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals_phy.h291
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c569
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.h178
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c412
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c534
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.h94
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h390
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c1251
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h636
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.h400
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c319
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c360
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.h77
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.c42
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.h77
-rw-r--r--drivers/net/wireless/mwifiex/11h.c48
-rw-r--r--drivers/net/wireless/mwifiex/11n.c24
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c7
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c51
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig2
-rw-r--r--drivers/net/wireless/mwifiex/README6
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c462
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c50
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c365
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c165
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c97
-rw-r--r--drivers/net/wireless/mwifiex/fw.h9
-rw-r--r--drivers/net/wireless/mwifiex/init.c54
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h6
-rw-r--r--drivers/net/wireless/mwifiex/join.c222
-rw-r--r--drivers/net/wireless/mwifiex/main.c224
-rw-r--r--drivers/net/wireless/mwifiex/main.h59
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c589
-rw-r--r--drivers/net/wireless/mwifiex/scan.c377
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c458
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c159
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c197
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c132
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c147
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c13
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c18
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c86
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c30
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c22
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c44
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c36
-rw-r--r--drivers/net/wireless/mwifiex/usb.c141
-rw-r--r--drivers/net/wireless/mwifiex/util.c34
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c107
-rw-r--r--drivers/net/wireless/mwl8k.c11
-rw-r--r--drivers/net/wireless/p54/fwio.c3
-rw-r--r--drivers/net/wireless/p54/led.c2
-rw-r--r--drivers/net/wireless/p54/main.c18
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c35
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800soc.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c13
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c9
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.h3
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c42
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/hw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/fw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c13
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/fw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/hw.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c12
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c71
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c54
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c12
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/of_mdio.c3
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c3
-rw-r--r--drivers/s390/net/qeth_core_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core_mpc.h3
-rw-r--r--drivers/s390/net/qeth_l2_main.c124
-rw-r--r--drivers/s390/net/qeth_l2_sys.c74
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/vt6655/device_main.c19
-rw-r--r--drivers/staging/vt6656/main_usb.c19
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c1
-rw-r--r--drivers/target/target_core_file.c1
-rw-r--r--drivers/target/target_core_pr.c1
-rw-r--r--drivers/target/target_core_transport.c1
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/dlm/lowcomms.c16
-rw-r--r--fs/splice.c1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h2
-rw-r--r--include/dt-bindings/net/ti-dp83867.h45
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h43
-rw-r--r--include/linux/bcma/bcma.h9
-rw-r--r--include/linux/bpf.h32
-rw-r--r--include/linux/brcmphy.h7
-rw-r--r--include/linux/etherdevice.h42
-rw-r--r--include/linux/filter.h30
-rw-r--r--include/linux/gfp.h5
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/if_vlan.h28
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/mdio-gpio.h3
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/mlx5/cq.h3
-rw-r--r--include/linux/mlx5/device.h215
-rw-r--r--include/linux/mlx5/driver.h171
-rw-r--r--include/linux/mlx5/flow_table.h54
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6584
-rw-r--r--include/linux/mlx5/qp.h25
-rw-r--r--include/linux/mlx5/vport.h55
-rw-r--r--include/linux/mm_types.h18
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h5
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--include/linux/netfilter.h39
-rw-r--r--include/linux/netfilter/ipset/ip_set.h32
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_ingress.h41
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/rtnetlink.h10
-rw-r--r--include/linux/skbuff.h70
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tcp.h15
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/af_vsock.h2
-rw-r--r--include/net/bond_options.h3
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/cfg80211.h8
-rw-r--r--include/net/cfg802154.h70
-rw-r--r--include/net/checksum.h4
-rw-r--r--include/net/codel.h12
-rw-r--r--include/net/dst.h18
-rw-r--r--include/net/flow_dissector.h220
-rw-r--r--include/net/flow_keys.h45
-rw-r--r--include/net/geneve.h5
-rw-r--r--include/net/ieee802154_netdev.h16
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inet_hashtables.h49
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ip.h43
-rw-r--r--include/net/ip6_fib.h45
-rw-r--r--include/net/ip6_route.h21
-rw-r--r--include/net/ipv6.h43
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h265
-rw-r--r--include/net/mac802154.h38
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_tables.h8
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/nl802154.h79
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/sch_generic.h23
-rw-r--r--include/net/sock.h30
-rw-r--r--include/net/switchdev.h257
-rw-r--r--include/net/tcp.h95
-rw-r--r--include/uapi/linux/bpf.h24
-rw-r--r--include/uapi/linux/can.h6
-rw-r--r--include/uapi/linux/can/gw.h5
-rw-r--r--include/uapi/linux/ethtool.h37
-rw-r--r--include/uapi/linux/if_link.h14
-rw-r--r--include/uapi/linux/if_packet.h7
-rw-r--r--include/uapi/linux/in.h3
-rw-r--r--include/uapi/linux/ipv6_route.h1
-rw-r--r--include/uapi/linux/netfilter.h6
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/netlink.h1
-rw-r--r--include/uapi/linux/nl80211.h28
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/pkt_cls.h57
-rw-r--r--include/uapi/linux/pkt_sched.h7
-rw-r--r--include/uapi/linux/rds.h10
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/tcp.h6
-rw-r--r--kernel/bpf/arraymap.c113
-rw-r--r--kernel/bpf/core.c98
-rw-r--r--kernel/bpf/helpers.c47
-rw-r--r--kernel/bpf/syscall.c42
-rw-r--r--kernel/bpf/verifier.c54
-rw-r--r--kernel/seccomp.c70
-rw-r--r--kernel/trace/bpf_trace.c14
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/test_bpf.c2664
-rw-r--r--lib/test_rhashtable.c215
-rw-r--r--mm/page_alloc.c98
-rw-r--r--net/8021q/vlan.c96
-rw-r--r--net/Kconfig3
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/common.c4
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/pvc.c2
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/batman-adv/Makefile6
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c210
-rw-r--r--net/batman-adv/bitarray.c6
-rw-r--r--net/batman-adv/bitarray.h8
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c56
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h12
-rw-r--r--net/batman-adv/debugfs.c47
-rw-r--r--net/batman-adv/debugfs.h43
-rw-r--r--net/batman-adv/distributed-arp-table.c47
-rw-r--r--net/batman-adv/distributed-arp-table.h15
-rw-r--r--net/batman-adv/fragmentation.c44
-rw-r--r--net/batman-adv/fragmentation.h11
-rw-r--r--net/batman-adv/gateway_client.c41
-rw-r--r--net/batman-adv/gateway_client.h10
-rw-r--r--net/batman-adv/gateway_common.c13
-rw-r--r--net/batman-adv/gateway_common.h9
-rw-r--r--net/batman-adv/hard-interface.c40
-rw-r--r--net/batman-adv/hard-interface.h13
-rw-r--r--net/batman-adv/hash.c8
-rw-r--r--net/batman-adv/hash.h33
-rw-r--r--net/batman-adv/icmp_socket.c35
-rw-r--r--net/batman-adv/icmp_socket.h9
-rw-r--r--net/batman-adv/main.c103
-rw-r--r--net/batman-adv/main.h40
-rw-r--r--net/batman-adv/multicast.c31
-rw-r--r--net/batman-adv/multicast.h8
-rw-r--r--net/batman-adv/network-coding.c53
-rw-r--r--net/batman-adv/network-coding.h15
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h28
-rw-r--r--net/batman-adv/packet.h5
-rw-r--r--net/batman-adv/routing.c38
-rw-r--r--net/batman-adv/routing.h12
-rw-r--r--net/batman-adv/send.c40
-rw-r--r--net/batman-adv/send.h15
-rw-r--r--net/batman-adv/soft-interface.c72
-rw-r--r--net/batman-adv/soft-interface.h13
-rw-r--r--net/batman-adv/sysfs.c62
-rw-r--r--net/batman-adv/sysfs.h12
-rw-r--r--net/batman-adv/translation-table.c91
-rw-r--r--net/batman-adv/translation-table.h11
-rw-r--r--net/batman-adv/types.h33
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_core.c5
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/hidp/sock.c2
-rw-r--r--net/bluetooth/l2cap_sock.c10
-rw-r--r--net/bluetooth/mgmt.c6
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c8
-rw-r--r--net/bluetooth/sco.c8
-rw-r--r--net/bluetooth/smp.c20
-rw-r--r--net/bridge/br.c22
-rw-r--r--net/bridge/br_fdb.c14
-rw-r--r--net/bridge/br_multicast.c262
-rw-r--r--net/bridge/br_netfilter.c40
-rw-r--r--net/bridge/br_netlink.c24
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_stp.c6
-rw-r--r--net/bridge/netfilter/ebt_stp.c6
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/can/af_can.c2
-rw-r--r--net/can/gw.c68
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/core/dev.c220
-rw-r--r--net/core/ethtool.c13
-rw-r--r--net/core/filter.c259
-rw-r--r--net/core/flow_dissector.c656
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/net-sysfs.c10
-rw-r--r--net/core/net_namespace.c133
-rw-r--r--net/core/netevent.c5
-rw-r--r--net/core/pktgen.c110
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c387
-rw-r--r--net/core/sock.c44
-rw-r--r--net/core/stream.c6
-rw-r--r--net/core/utils.c12
-rw-r--r--net/decnet/af_decnet.c8
-rw-r--r--net/dsa/slave.c54
-rw-r--r--net/ethernet/eth.c15
-rw-r--r--net/ieee802154/6lowpan/core.c28
-rw-r--r--net/ieee802154/6lowpan/tx.c2
-rw-r--r--net/ieee802154/core.c2
-rw-r--r--net/ieee802154/nl-mac.c39
-rw-r--r--net/ieee802154/nl-phy.c10
-rw-r--r--net/ieee802154/nl802154.c276
-rw-r--r--net/ieee802154/rdev-ops.h23
-rw-r--r--net/ieee802154/socket.c22
-rw-r--r--net/ieee802154/trace.h32
-rw-r--r--net/ipv4/Kconfig24
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/fib_trie.c62
-rw-r--r--net/ipv4/geneve_core.c (renamed from net/ipv4/geneve.c)10
-rw-r--r--net/ipv4/igmp.c162
-rw-r--r--net/ipv4/inet_connection_sock.c19
-rw-r--r--net/ipv4/inet_hashtables.c57
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_forward.c18
-rw-r--r--net/ipv4/ip_fragment.c46
-rw-r--r--net/ipv4/ip_output.c84
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ip_tunnel_core.c20
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c5
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c23
-rw-r--r--net/ipv4/syncookies.c10
-rw-r--r--net/ipv4/sysctl_net_ipv4.c25
-rw-r--r--net/ipv4/tcp.c89
-rw-r--r--net/ipv4/tcp_cdg.c433
-rw-r--r--net/ipv4/tcp_dctcp.c26
-rw-r--r--net/ipv4/tcp_input.c139
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_offload.c4
-rw-r--r--net/ipv4/tcp_output.c106
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp_tunnel.c8
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ip6_fib.c25
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_output.c60
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_udp_tunnel.c6
-rw-r--r--net/ipv6/mcast_snoop.c213
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/output_core.c13
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c561
-rw-r--r--net/ipv6/syncookies.c19
-rw-r--r--net/ipv6/sysctl_net_ipv6.c8
-rw-r--r--net/ipv6/tcp_ipv6.c16
-rw-r--r--net/ipv6/xfrm6_policy.c20
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/timer.c4
-rw-r--r--net/iucv/af_iucv.c10
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c15
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c6
-rw-r--r--net/mac80211/Kconfig16
-rw-r--r--net/mac80211/agg-tx.c4
-rw-r--r--net/mac80211/cfg.c214
-rw-r--r--net/mac80211/chan.c10
-rw-r--r--net/mac80211/debugfs.c177
-rw-r--r--net/mac80211/debugfs_key.c17
-rw-r--r--net/mac80211/debugfs_sta.c85
-rw-r--r--net/mac80211/driver-ops.h13
-rw-r--r--net/mac80211/ethtool.c3
-rw-r--r--net/mac80211/ibss.c6
-rw-r--r--net/mac80211/ieee80211_i.h36
-rw-r--r--net/mac80211/iface.c74
-rw-r--r--net/mac80211/key.c96
-rw-r--r--net/mac80211/key.h7
-rw-r--r--net/mac80211/led.c268
-rw-r--r--net/mac80211/led.h44
-rw-r--r--net/mac80211/main.c34
-rw-r--r--net/mac80211/mesh.c1
-rw-r--r--net/mac80211/mesh_hwmp.c35
-rw-r--r--net/mac80211/mesh_plink.c44
-rw-r--r--net/mac80211/mlme.c247
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/pm.c4
-rw-r--r--net/mac80211/rate.c14
-rw-r--r--net/mac80211/rate.h14
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rx.c227
-rw-r--r--net/mac80211/scan.c18
-rw-r--r--net/mac80211/sta_info.c24
-rw-r--r--net/mac80211/sta_info.h44
-rw-r--r--net/mac80211/status.c163
-rw-r--r--net/mac80211/tdls.c52
-rw-r--r--net/mac80211/trace.h42
-rw-r--r--net/mac80211/tx.c549
-rw-r--r--net/mac80211/util.c6
-rw-r--r--net/mac80211/wpa.c10
-rw-r--r--net/mac802154/Kconfig1
-rw-r--r--net/mac802154/cfg.c101
-rw-r--r--net/mac802154/driver-ops.h8
-rw-r--r--net/mac802154/ieee802154_i.h7
-rw-r--r--net/mac802154/iface.c30
-rw-r--r--net/mac802154/mac_cmd.c42
-rw-r--r--net/mac802154/main.c32
-rw-r--r--net/mac802154/mib.c63
-rw-r--r--net/mac802154/rx.c5
-rw-r--r--net/mac802154/util.c5
-rw-r--r--net/mpls/mpls_gso.c2
-rw-r--r--net/netfilter/Kconfig13
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/core.c37
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c17
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c13
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c49
-rw-r--r--net/netfilter/ipset/ip_set_getport.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c33
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c43
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c40
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c40
-rw-r--r--net/netfilter/ipset/ip_set_hash_mac.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c28
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c29
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c30
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c38
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c52
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c30
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c19
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c4
-rw-r--r--net/netfilter/nf_tables_api.c46
-rw-r--r--net/netfilter/nf_tables_netdev.c183
-rw-r--r--net/netfilter/nfnetlink_queue_core.c2
-rw-r--r--net/netfilter/nft_compat.c2
-rw-r--r--net/netfilter/x_tables.c18
-rw-r--r--net/netfilter/xt_TCPMSS.c6
-rw-r--r--net/netfilter/xt_TEE.c1
-rw-r--r--net/netfilter/xt_addrtype.c2
-rw-r--r--net/netfilter/xt_mark.c1
-rw-r--r--net/netfilter/xt_set.c3
-rw-r--r--net/netlink/af_netlink.c148
-rw-r--r--net/netrom/af_netrom.c4
-rw-r--r--net/nfc/af_nfc.c2
-rw-r--r--net/nfc/llcp.h2
-rw-r--r--net/nfc/llcp_core.c2
-rw-r--r--net/nfc/llcp_sock.c8
-rw-r--r--net/nfc/nfc.h2
-rw-r--r--net/nfc/rawsock.c4
-rw-r--r--net/openvswitch/Kconfig2
-rw-r--r--net/openvswitch/actions.c23
-rw-r--r--net/openvswitch/datapath.c20
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/openvswitch/vport-geneve.c5
-rw-r--r--net/packet/af_packet.c173
-rw-r--r--net/packet/internal.h12
-rw-r--r--net/phonet/af_phonet.c2
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/rds/af_rds.c43
-rw-r--r--net/rds/bind.c4
-rw-r--r--net/rds/rds.h6
-rw-r--r--net/rds/transport.c21
-rw-r--r--net/rfkill/core.c12
-rw-r--r--net/rfkill/rfkill-gpio.c24
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/rxrpc/ar-local.c4
-rw-r--r--net/sched/Kconfig11
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_bpf.c9
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c16
-rw-r--r--net/sched/cls_flow.c28
-rw-r--r--net/sched/cls_flower.c691
-rw-r--r--net/sched/sch_api.c12
-rw-r--r--net/sched/sch_choke.c20
-rw-r--r--net/sched/sch_codel.c15
-rw-r--r--net/sched/sch_fq_codel.c26
-rw-r--r--net/sched/sch_gred.c28
-rw-r--r--net/sched/sch_hhf.c19
-rw-r--r--net/sched/sch_ingress.c59
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sched/sch_sfb.c24
-rw-r--r--net/sched/sch_sfq.c27
-rw-r--r--net/sctp/ipv6.c7
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/socket.c7
-rw-r--r--net/switchdev/switchdev.c835
-rw-r--r--net/tipc/addr.c7
-rw-r--r--net/tipc/addr.h8
-rw-r--r--net/tipc/bcast.c41
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c20
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/core.c4
-rw-r--r--net/tipc/core.h37
-rw-r--r--net/tipc/link.c311
-rw-r--r--net/tipc/link.h59
-rw-r--r--net/tipc/msg.c51
-rw-r--r--net/tipc/msg.h37
-rw-r--r--net/tipc/name_table.c34
-rw-r--r--net/tipc/net.c1
-rw-r--r--net/tipc/netlink_compat.c137
-rw-r--r--net/tipc/node.c3
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/server.c6
-rw-r--r--net/tipc/socket.c10
-rw-r--r--net/tipc/subscr.c242
-rw-r--r--net/tipc/subscr.h18
-rw-r--r--net/unix/af_unix.c267
-rw-r--r--net/vmw_vsock/af_vsock.c7
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/wireless/chan.c65
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/nl80211.c7
-rw-r--r--net/wireless/reg.c4
-rw-r--r--net/wireless/sme.c4
-rw-r--r--net/wireless/sysfs.c14
-rw-r--r--net/wireless/util.c5
-rw-r--r--net/x25/af_x25.c8
-rw-r--r--net/xfrm/xfrm_input.c12
-rw-r--r--net/xfrm/xfrm_output.c12
-rw-r--r--net/xfrm/xfrm_policy.c42
-rw-r--r--net/xfrm/xfrm_state.c4
-rw-r--r--samples/bpf/Makefile10
-rw-r--r--samples/bpf/bpf_helpers.h4
-rw-r--r--samples/bpf/bpf_load.c57
-rw-r--r--samples/bpf/sockex3_kern.c290
-rw-r--r--samples/bpf/sockex3_user.c66
-rw-r--r--samples/bpf/tcbpf1_kern.c8
-rw-r--r--samples/bpf/test_verifier.c84
-rw-r--r--samples/bpf/tracex5_kern.c75
-rw-r--r--samples/bpf/tracex5_user.c46
-rw-r--r--samples/pktgen/README.rst43
-rw-r--r--samples/pktgen/functions.sh121
-rw-r--r--samples/pktgen/parameters.sh97
-rwxr-xr-xsamples/pktgen/pktgen.conf-1-159
-rwxr-xr-xsamples/pktgen/pktgen.conf-2-166
-rwxr-xr-xsamples/pktgen/pktgen.conf-2-273
-rwxr-xr-xsamples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh86
-rwxr-xr-xsamples/pktgen/pktgen_sample01_simple.sh71
-rwxr-xr-xsamples/pktgen/pktgen_sample02_multiqueue.sh75
-rwxr-xr-xsamples/pktgen/pktgen_sample03_burst_single_flow.sh82
-rw-r--r--tools/testing/selftests/net/psock_fanout.c2
1148 files changed, 97218 insertions, 22913 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio b/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio
new file mode 100644
index 000000000000..4d08f28dc871
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio
@@ -0,0 +1,8 @@
+What: /sys/bus/pci/drivers/janz-cmodio/.../modulbus_number
+Date: May 2010
+KernelVersion: 2.6.35
+Contact: Ira W. Snyder <ira.snyder@gmail.com>
+Description:
+ Value representing the HEX switch S2 of the janz carrier board CMOD-IO or CAN-PCI2
+
+ Read-only: value of the configuration switch (0..15)
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index 5ecfd72ba684..668604fc8e06 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -39,6 +39,25 @@ Description:
Format is a string, e.g: 00:11:22:33:44:55 for an Ethernet MAC
address.
+What: /sys/class/net/<bridge iface>/bridge/group_fwd_mask
+Date: January 2012
+KernelVersion: 3.2
+Contact: netdev@vger.kernel.org
+Description:
+ Bitmask to allow forwarding of link local frames with address
+ 01-80-C2-00-00-0X on a bridge device. Only values that set bits
+ not matching BR_GROUPFWD_RESTRICTED in net/bridge/br_private.h
+ allowed.
+ Default value 0 does not forward any link local frames.
+
+ Restricted bits:
+ 0: 01-80-C2-00-00-00 Bridge Group Address used for STP
+ 1: 01-80-C2-00-00-01 (MAC Control) 802.3 used for MAC PAUSE
+ 2: 01-80-C2-00-00-02 (Link Aggregation) 802.3ad
+
+ Any values not setting these bits can be used. Take special
+ care when forwarding control frames e.g. 802.1X-PAE or LLDP.
+
What: /sys/class/net/<iface>/broadcast
Date: April 2005
KernelVersion: 2.6.12
diff --git a/Documentation/ABI/testing/sysfs-class-net-janz-ican3 b/Documentation/ABI/testing/sysfs-class-net-janz-ican3
new file mode 100644
index 000000000000..fdbc03a2b8f8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-net-janz-ican3
@@ -0,0 +1,19 @@
+What: /sys/class/net/<iface>/termination
+Date: May 2010
+KernelVersion: 2.6.35
+Contact: Ira W. Snyder <ira.snyder@gmail.com>
+Description:
+ Value representing the can bus termination
+
+ Default: 1 (termination active)
+ Reading: get actual termination state
+ Writing: set actual termination state (0=no termination, 1=termination active)
+
+What: /sys/class/net/<iface>/fwinfo
+Date: May 2015
+KernelVersion: 3.19
+Contact: Andreas Gröger <andreas24groeger@gmail.com>
+Description:
+ Firmware stamp of ican3 module
+ Read-only: 32 byte string identification of the ICAN3 module
+ (known values: "JANZ-ICAN3 ICANOS 1.xx", "JANZ-ICAN3 CAL/CANopen 1.xx")
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
deleted file mode 100644
index 8db32384a486..000000000000
--- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-* AMD 10GbE PHY driver (amd-xgbe-phy)
-
-Required properties:
-- compatible: Should be "amd,xgbe-phy-seattle-v1a" and
- "ethernet-phy-ieee802.3-c45"
-- reg: Address and length of the register sets for the device
- - SerDes Rx/Tx registers
- - SerDes integration registers (1/2)
- - SerDes integration registers (2/2)
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
-- interrupts: Should contain the amd-xgbe-phy interrupt.
-
-Optional properties:
-- amd,speed-set: Speed capabilities of the device
- 0 - 1GbE and 10GbE (default)
- 1 - 2.5GbE and 10GbE
-
-The following optional properties are represented by an array with each
-value corresponding to a particular speed. The first array value represents
-the setting for the 1GbE speed, the second value for the 2.5GbE speed and
-the third value for the 10GbE speed. All three values are required if the
-property is used.
-- amd,serdes-blwc: Baseline wandering correction enablement
- 0 - Off
- 1 - On
-- amd,serdes-cdr-rate: CDR rate speed selection
-- amd,serdes-pq-skew: PQ (data sampling) skew
-- amd,serdes-tx-amp: TX amplitude boost
-- amd,serdes-dfe-tap-config: DFE taps available to run
-- amd,serdes-dfe-tap-enable: DFE taps to enable
-
-Example:
- xgbe_phy@e1240800 {
- compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
- reg = <0 0xe1240800 0 0x00400>,
- <0 0xe1250000 0 0x00060>,
- <0 0xe1250080 0 0x00004>;
- interrupt-parent = <&gic>;
- interrupts = <0 323 4>;
- amd,speed-set = <0>;
- amd,serdes-blwc = <1>, <1>, <0>;
- amd,serdes-cdr-rate = <2>, <2>, <7>;
- amd,serdes-pq-skew = <10>, <10>, <30>;
- amd,serdes-tx-amp = <15>, <15>, <10>;
- amd,serdes-dfe-tap-config = <3>, <3>, <1>;
- amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
- };
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 26efd526d16c..4bb624a73b54 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -5,12 +5,16 @@ Required properties:
- reg: Address and length of the register sets for the device
- MAC registers
- PCS registers
+ - SerDes Rx/Tx registers
+ - SerDes integration registers (1/2)
+ - SerDes integration registers (2/2)
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
listed is required and is the general device interrupt. If the optional
amd,per-channel-interrupt property is specified, then one additional
- interrupt for each DMA channel supported by the device should be specified
+ interrupt for each DMA channel supported by the device should be specified.
+ The last interrupt listed should be the PCS auto-negotiation interrupt.
- clocks:
- DMA clock for the amd-xgbe device (used for calculating the
correct Rx interrupt watchdog timer value on a DMA channel
@@ -19,7 +23,6 @@ Required properties:
- clock-names: Should be the names of the clocks
- "dma_clk" for the DMA clock
- "ptp_clk" for the PTP clock
-- phy-handle: See ethernet.txt file in the same directory
- phy-mode: See ethernet.txt file in the same directory
Optional properties:
@@ -29,19 +32,46 @@ Optional properties:
- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
a unique interrupt for each DMA channel - this requires an additional
interrupt be configured for each DMA channel
+- amd,speed-set: Speed capabilities of the device
+ 0 - 1GbE and 10GbE (default)
+ 1 - 2.5GbE and 10GbE
+
+The following optional properties are represented by an array with each
+value corresponding to a particular speed. The first array value represents
+the setting for the 1GbE speed, the second value for the 2.5GbE speed and
+the third value for the 10GbE speed. All three values are required if the
+property is used.
+- amd,serdes-blwc: Baseline wandering correction enablement
+ 0 - Off
+ 1 - On
+- amd,serdes-cdr-rate: CDR rate speed selection
+- amd,serdes-pq-skew: PQ (data sampling) skew
+- amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
Example:
xgbe@e0700000 {
compatible = "amd,xgbe-seattle-v1a";
reg = <0 0xe0700000 0 0x80000>,
- <0 0xe0780000 0 0x80000>;
+ <0 0xe0780000 0 0x80000>,
+ <0 0xe1240800 0 0x00400>,
+ <0 0xe1250000 0 0x00060>,
+ <0 0xe1250080 0 0x00004>;
interrupt-parent = <&gic>;
interrupts = <0 325 4>,
- <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
+ <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>,
+ <0 323 4>;
amd,per-channel-interrupt;
clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
clock-names = "dma_clk", "ptp_clk";
- phy-handle = <&phy>;
phy-mode = "xgmii";
mac-address = [ 02 a1 a2 a3 a4 a5 ];
+ amd,speed-set = <0>;
+ amd,serdes-blwc = <1>, <1>, <0>;
+ amd,serdes-cdr-rate = <2>, <2>, <7>;
+ amd,serdes-pq-skew = <10>, <10>, <30>;
+ amd,serdes-tx-amp = <15>, <15>, <10>;
+ amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+ amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
};
diff --git a/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt
new file mode 100644
index 000000000000..6d7ab4e524d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt
@@ -0,0 +1,35 @@
+* IPQ806x DWMAC Ethernet controller
+
+The device inherits all the properties of the dwmac/stmmac devices
+described in the file net/stmmac.txt with the following changes.
+
+Required properties:
+
+- compatible: should be "qcom,ipq806x-gmac" along with "snps,dwmac"
+ and any applicable more detailed version number
+ described in net/stmmac.txt
+
+- qcom,nss-common: should contain a phandle to a syscon device mapping the
+ nss-common registers.
+
+- qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the
+ qsgmii-csr registers.
+
+Example:
+
+ gmac: ethernet@37000000 {
+ device_type = "network";
+ compatible = "qcom,ipq806x-gmac";
+ reg = <0x37000000 0x200000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+
+ qcom,nss-common = <&nss_common>;
+ qcom,qsgmii-csr = <&qsgmii_csr>;
+
+ clocks = <&gcc GMAC_CORE1_CLK>;
+ clock-names = "stmmaceth";
+
+ resets = <&gcc GMAC_CORE1_RESET>;
+ reset-names = "stmmaceth";
+ };
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index ba19d671e808..8ec5fdf444e9 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -9,6 +9,7 @@ Required properties:
the Cadence GEM, or the generic form: "cdns,gem".
Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
+ Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
- phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
new file mode 100644
index 000000000000..7edba1264f6f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
@@ -0,0 +1,20 @@
+* NXP LPC1850 GMAC ethernet controller
+
+This device is a platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+Required properties:
+ - compatible: Should contain "nxp,lpc1850-dwmac"
+
+Examples:
+
+mac: ethernet@40010000 {
+ compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
+ reg = <0x40010000 0x2000>;
+ interrupts = <5>;
+ interrupt-names = "macirq";
+ clocks = <&ccu1 CLK_CPU_ETHERNET>;
+ clock-names = "stmmaceth";
+ resets = <&rgu 22>;
+ reset-names = "stmmaceth";
+}
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 40831fbaff72..525e1658f2da 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -30,6 +30,9 @@ Optional Properties:
- max-speed: Maximum PHY supported speed (10, 100, 1000...)
+- broken-turn-around: If set, indicates the PHY device does not correctly
+ release the turn around line low at the end of a MDIO transaction.
+
Example:
ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
new file mode 100644
index 000000000000..1fd8831437bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -0,0 +1,48 @@
+* Renesas Electronics Ethernet AVB
+
+This file provides information on what the device node for the Ethernet AVB
+interface contains.
+
+Required properties:
+- compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC.
+ "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
+- reg: offset and length of (1) the register block and (2) the stream buffer.
+- interrupts: interrupt specifier for the sole interrupt.
+- phy-mode: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory.
+- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
+- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
+- clocks: clock phandle and specifier pair.
+- pinctrl-0: phandle, referring to a default pin configuration node.
+
+Optional properties:
+- interrupt-parent: the phandle for the interrupt controller that services
+ interrupts for this device.
+- pinctrl-names: pin configuration state name ("default").
+- renesas,no-ether-link: boolean, specify when a board does not provide a proper
+ AVB_LINK signal.
+- renesas,ether-link-active-low: boolean, specify when the AVB_LINK signal is
+ active-low instead of normal active-high.
+
+Example:
+
+ ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a7790";
+ reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
+ phy-mode = "rmii";
+ phy-handle = <&phy0>;
+ pinctrl-0 = <&ether_pins>;
+ pinctrl-names = "default";
+ renesas,no-ether-link;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
new file mode 100644
index 000000000000..58d935b58598
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -0,0 +1,25 @@
+* Texas Instruments - dp83867 Giga bit ethernet phy
+
+Required properties:
+ - reg - The ID number for the phy, usually a small integer
+ - ti,rx-internal-delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h
+ for applicable values
+ - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
+ for applicable values
+ - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
+ for applicable values
+
+Default child nodes are standard Ethernet PHY device
+nodes as described in Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+ ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_75_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+
+Datasheet can be found:
+http://www.ti.com/product/DP83867IR/datasheet
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 83bf4986baea..334b49ef02d1 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -51,6 +51,7 @@ Table of Contents
3.4 Configuring Bonding Manually via Sysfs
3.5 Configuration with Interfaces Support
3.6 Overriding Configuration for Special Cases
+3.7 Configuring LACP for 802.3ad mode in a more secure way
4. Querying Bonding Configuration
4.1 Bonding Configuration
@@ -178,6 +179,27 @@ active_slave
active slave, or the empty string if there is no active slave or
the current mode does not use an active slave.
+ad_actor_sys_prio
+
+ In an AD system, this specifies the system priority. The allowed range
+ is 1 - 65535. If the value is not specified, it takes 65535 as the
+ default value.
+
+ This parameter has effect only in 802.3ad mode and is available through
+ SysFs interface.
+
+ad_actor_system
+
+ In an AD system, this specifies the mac-address for the actor in
+ protocol packet exchanges (LACPDUs). The value cannot be NULL or
+ multicast. It is preferred to have the local-admin bit set for this
+ mac but driver does not enforce it. If the value is not given then
+ system defaults to using the masters' mac address as actors' system
+ address.
+
+ This parameter has effect only in 802.3ad mode and is available through
+ SysFs interface.
+
ad_select
Specifies the 802.3ad aggregation selection logic to use. The
@@ -220,6 +242,21 @@ ad_select
This option was added in bonding version 3.4.0.
+ad_user_port_key
+
+ In an AD system, the port-key has three parts as shown below -
+
+ Bits Use
+ 00 Duplex
+ 01-05 Speed
+ 06-15 User-defined
+
+ This defines the upper 10 bits of the port key. The values can be
+ from 0 - 1023. If not given, the system defaults to 0.
+
+ This parameter has effect only in 802.3ad mode and is available through
+ SysFs interface.
+
all_slaves_active
Specifies that duplicate frames (received on inactive ports) should be
@@ -1622,6 +1659,53 @@ output port selection.
This feature first appeared in bonding driver version 3.7.0 and support for
output slave selection was limited to round-robin and active-backup modes.
+3.7 Configuring LACP for 802.3ad mode in a more secure way
+----------------------------------------------------------
+
+When using 802.3ad bonding mode, the Actor (host) and Partner (switch)
+exchange LACPDUs. These LACPDUs cannot be sniffed, because they are
+destined to link local mac addresses (which switches/bridges are not
+supposed to forward). However, most of the values are easily predictable
+or are simply the machine's MAC address (which is trivially known to all
+other hosts in the same L2). This implies that other machines in the L2
+domain can spoof LACPDU packets from other hosts to the switch and potentially
+cause mayhem by joining (from the point of view of the switch) another
+machine's aggregate, thus receiving a portion of that hosts incoming
+traffic and / or spoofing traffic from that machine themselves (potentially
+even successfully terminating some portion of flows). Though this is not
+a likely scenario, one could avoid this possibility by simply configuring
+few bonding parameters:
+
+ (a) ad_actor_system : You can set a random mac-address that can be used for
+ these LACPDU exchanges. The value can not be either NULL or Multicast.
+ Also it's preferable to set the local-admin bit. Following shell code
+ generates a random mac-address as described above.
+
+ # sys_mac_addr=$(printf '%02x:%02x:%02x:%02x:%02x:%02x' \
+ $(( (RANDOM & 0xFE) | 0x02 )) \
+ $(( RANDOM & 0xFF )) \
+ $(( RANDOM & 0xFF )) \
+ $(( RANDOM & 0xFF )) \
+ $(( RANDOM & 0xFF )) \
+ $(( RANDOM & 0xFF )))
+ # echo $sys_mac_addr > /sys/class/net/bond0/bonding/ad_actor_system
+
+ (b) ad_actor_sys_prio : Randomize the system priority. The default value
+ is 65535, but system can take the value from 1 - 65535. Following shell
+ code generates random priority and sets it.
+
+ # sys_prio=$(( 1 + RANDOM + RANDOM ))
+ # echo $sys_prio > /sys/class/net/bond0/bonding/ad_actor_sys_prio
+
+ (c) ad_user_port_key : Use the user portion of the port-key. The default
+ keeps this empty. These are the upper 10 bits of the port-key and value
+ ranges from 0 - 1023. Following shell code generates these 10 bits and
+ sets it.
+
+ # usr_port_key=$(( RANDOM & 0x3FF ))
+ # echo $usr_port_key > /sys/class/net/bond0/bonding/ad_user_port_key
+
+
4 Querying Bonding Configuration
=================================
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 5abad1e921ca..b48d4a149411 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -268,6 +268,9 @@ solution for a couple of reasons:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* frame payload length in byte (0 .. 8) */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
__u8 data[8] __attribute__((aligned(8)));
};
diff --git a/Documentation/networking/dctcp.txt b/Documentation/networking/dctcp.txt
index 0d5dfbc89ec9..13a857753208 100644
--- a/Documentation/networking/dctcp.txt
+++ b/Documentation/networking/dctcp.txt
@@ -8,6 +8,7 @@ the data center network to provide multi-bit feedback to the end hosts.
To enable it on end hosts:
sysctl -w net.ipv4.tcp_congestion_control=dctcp
+ sysctl -w net.ipv4.tcp_ecn_fallback=0 (optional)
All switches in the data center network running DCTCP must support ECN
marking and be configured for marking when reaching defined switch buffer
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 22bbc7225f8e..1700756af057 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -30,8 +30,8 @@ int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0);
The address family, socket addresses etc. are defined in the
include/net/af_ieee802154.h header or in the special header
-in our userspace package (see either linux-zigbee sourceforge download page
-or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee).
+in the userspace package (see either http://wpan.cakelab.org/ or the
+git tree at https://github.com/linux-wpan/wpan-tools).
One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
@@ -49,15 +49,6 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
Those types of devices require different approach to be hooked into Linux kernel.
-MLME - MAC Level Management
-============================
-
-Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
-See the include/net/nl802154.h header. Our userspace tools package
-(see above) provides CLI configuration utility for radio interfaces and simple
-coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
-
-
HardMAC
=======
@@ -75,8 +66,6 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields
assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional.
All other fields are required.
-We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c
-
SoftMAC
=======
@@ -89,7 +78,8 @@ stack interface for network sniffers (e.g. WireShark).
This layer is going to be extended soon.
-See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
+See header include/net/mac802154.h and several drivers in
+drivers/net/ieee802154/.
Device drivers API
@@ -114,18 +104,17 @@ Moreover IEEE 802.15.4 device operations structure should be filled.
Fake drivers
============
-In addition there are two drivers available which simulate real devices with
-HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver)
-interfaces. This option provides possibility to test and debug stack without
-usage of real hardware.
+In addition there is a driver available which simulates a real device with
+SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
+provides possibility to test and debug stack without usage of real hardware.
-See sources in drivers/ieee802154 folder for more details.
+See sources in drivers/net/ieee802154 folder for more details.
6LoWPAN Linux implementation
============================
-The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80
octets of actual MAC payload once security is turned on, on a wireless link
with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format
[RFC4944] was specified to carry IPv6 datagrams over such constrained links,
@@ -140,7 +129,8 @@ In Semptember 2011 the standard update was published - [RFC6282].
It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
used in this Linux implementation.
-All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+All the code related to 6lowpan you may find in files: net/6lowpan/*
+and net/ieee802154/6lowpan/*
To setup 6lowpan interface you need (busybox release > 1.17.0):
1. Add IEEE802.15.4 interface and initialize PANid;
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 071fb18dc57c..5fae7704daab 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -267,6 +267,15 @@ tcp_ecn - INTEGER
but do not request ECN on outgoing connections.
Default: 2
+tcp_ecn_fallback - BOOLEAN
+ If the kernel detects that ECN connection misbehaves, enable fall
+ back to non-ECN. Currently, this knob implements the fallback
+ from RFC3168, section 6.1.1.1., but we reserve that in future,
+ additional detection mechanisms could be implemented under this
+ knob. The value is not used, if tcp_ecn or per route (or congestion
+ control) ECN settings are disabled.
+ Default: 1 (fallback enabled)
+
tcp_fack - BOOLEAN
Enable FACK congestion avoidance and fast retransmission.
The value is not used, if tcp_sack is not enabled.
@@ -742,8 +751,10 @@ IP Variables:
ip_local_port_range - 2 INTEGERS
Defines the local port range that is used by TCP and UDP to
choose the local port. The first number is the first, the
- second the last local port number. The default values are
- 32768 and 61000 respectively.
+ second the last local port number.
+ If possible, it is better these numbers have different parity.
+ (one even and one odd values)
+ The default values are 32768 and 60999 respectively.
ip_local_reserved_ports - list of comma separated ranges
Specify the ports which are reserved for known third-party
@@ -766,7 +777,7 @@ ip_local_reserved_ports - list of comma separated ranges
ip_local_port_range, e.g.:
$ cat /proc/sys/net/ipv4/ip_local_port_range
- 32000 61000
+ 32000 60999
$ cat /proc/sys/net/ipv4/ip_local_reserved_ports
8080,9148
@@ -1213,6 +1224,14 @@ auto_flowlabels - BOOLEAN
FALSE: disabled
Default: false
+flowlabel_state_ranges - BOOLEAN
+ Split the flow label number space into two ranges. 0-0x7FFFF is
+ reserved for the IPv6 flow manager facility, 0x80000-0xFFFFF
+ is reserved for stateless flow labels as described in RFC6437.
+ TRUE: enabled
+ FALSE: disabled
+ Default: true
+
anycast_src_echo_reply - BOOLEAN
Controls the use of anycast addresses as source addresses for ICMPv6
echo reply
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 0344f1d45b37..f4be85e96005 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -1,6 +1,6 @@
- HOWTO for the linux packet generator
+ HOWTO for the linux packet generator
------------------------------------
Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel
@@ -50,17 +50,33 @@ For ixgbe use e.g. "30" resulting in approx 33K interrupts/sec (1/30*10^6):
# ethtool -C ethX rx-usecs 30
-Viewing threads
-===============
-/proc/net/pktgen/kpktgend_0
-Name: kpktgend_0 max_before_softirq: 10000
-Running:
-Stopped: eth1
-Result: OK: max_before_softirq=10000
+Kernel threads
+==============
+Pktgen creates a thread for each CPU with affinity to that CPU.
+Which is controlled through procfile /proc/net/pktgen/kpktgend_X.
+
+Example: /proc/net/pktgen/kpktgend_0
+
+ Running:
+ Stopped: eth4@0
+ Result: OK: add_device=eth4@0
+
+Most important are the devices assigned to the thread.
+
+The two basic thread commands are:
+ * add_device DEVICE@NAME -- adds a single device
+ * rem_device_all -- remove all associated devices
+
+When adding a device to a thread, a corrosponding procfile is created
+which is used for configuring this device. Thus, device names need to
+be unique.
-Most important are the devices assigned to the thread. Note that a
-device can only belong to one thread.
+To support adding the same device to multiple threads, which is useful
+with multi queue NICs, a the device naming scheme is extended with "@":
+ device@something
+The part after "@" can be anything, but it is custom to use the thread
+number.
Viewing devices
===============
@@ -69,29 +85,32 @@ The Params section holds configured information. The Current section
holds running statistics. The Result is printed after a run or after
interruption. Example:
-/proc/net/pktgen/eth1
+/proc/net/pktgen/eth4@0
-Params: count 10000000 min_pkt_size: 60 max_pkt_size: 60
- frags: 0 delay: 0 clone_skb: 1000000 ifname: eth1
+ Params: count 100000 min_pkt_size: 60 max_pkt_size: 60
+ frags: 0 delay: 0 clone_skb: 64 ifname: eth4@0
flows: 0 flowlen: 0
- dst_min: 10.10.11.2 dst_max:
- src_min: src_max:
- src_mac: 00:00:00:00:00:00 dst_mac: 00:04:23:AC:FD:82
- udp_src_min: 9 udp_src_max: 9 udp_dst_min: 9 udp_dst_max: 9
- src_mac_count: 0 dst_mac_count: 0
- Flags:
-Current:
- pkts-sofar: 10000000 errors: 39664
- started: 1103053986245187us stopped: 1103053999346329us idle: 880401us
- seq_num: 10000011 cur_dst_mac_offset: 0 cur_src_mac_offset: 0
- cur_saddr: 0x10a0a0a cur_daddr: 0x20b0a0a
- cur_udp_dst: 9 cur_udp_src: 9
+ queue_map_min: 0 queue_map_max: 0
+ dst_min: 192.168.81.2 dst_max:
+ src_min: src_max:
+ src_mac: 90:e2:ba:0a:56:b4 dst_mac: 00:1b:21:3c:9d:f8
+ udp_src_min: 9 udp_src_max: 109 udp_dst_min: 9 udp_dst_max: 9
+ src_mac_count: 0 dst_mac_count: 0
+ Flags: UDPSRC_RND NO_TIMESTAMP QUEUE_MAP_CPU
+ Current:
+ pkts-sofar: 100000 errors: 0
+ started: 623913381008us stopped: 623913396439us idle: 25us
+ seq_num: 100001 cur_dst_mac_offset: 0 cur_src_mac_offset: 0
+ cur_saddr: 192.168.8.3 cur_daddr: 192.168.81.2
+ cur_udp_dst: 9 cur_udp_src: 42
+ cur_queue_map: 0
flows: 0
-Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
- 763292pps 390Mb/sec (390805504bps) errors: 39664
+ Result: OK: 15430(c15405+d25) usec, 100000 (60byte,0frags)
+ 6480562pps 3110Mb/sec (3110669760bps) errors: 0
-Configuring threads and devices
-================================
+
+Configuring devices
+===================
This is done via the /proc interface, and most easily done via pgset
as defined in the sample scripts.
@@ -126,7 +145,7 @@ Examples:
To select queue 1 of a given device,
use queue_map_min=1 and queue_map_max=1
- pgset "src_mac_count 1" Sets the number of MACs we'll range through.
+ pgset "src_mac_count 1" Sets the number of MACs we'll range through.
The 'minimum' MAC is what you set with srcmac.
pgset "dst_mac_count 1" Sets the number of MACs we'll range through.
@@ -145,6 +164,7 @@ Examples:
UDPCSUM,
IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
NODE_ALLOC # node specific memory allocation
+ NO_TIMESTAMP # disable timestamping
pgset spi SPI_VALUE Set specific SA used to transform packet.
@@ -192,24 +212,43 @@ Examples:
pgset "rate 300M" set rate to 300 Mb/s
pgset "ratep 1000000" set rate to 1Mpps
+ pgset "xmit_mode netif_receive" RX inject into stack netif_receive_skb()
+ Works with "burst" but not with "clone_skb".
+ Default xmit_mode is "start_xmit".
+
Sample scripts
==============
-A collection of small tutorial scripts for pktgen is in the
-samples/pktgen directory:
+A collection of tutorial scripts and helpers for pktgen is in the
+samples/pktgen directory. The helper parameters.sh file support easy
+and consistant parameter parsing across the sample scripts.
+
+Usage example and help:
+ ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
+
+Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+ -i : ($DEV) output interface/device (required)
+ -s : ($PKT_SIZE) packet size
+ -d : ($DEST_IP) destination IP
+ -m : ($DST_MAC) destination MAC-addr
+ -t : ($THREADS) threads to start
+ -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+ -b : ($BURST) HW level bursting of SKBs
+ -v : ($VERBOSE) verbose
+ -x : ($DEBUG) debug
+
+The global variables being set are also listed. E.g. the required
+interface/device parameter "-i" sets variable $DEV. Copy the
+pktgen_sampleXX scripts and modify them to fit your own needs.
+
+The old scripts:
-pktgen.conf-1-1 # 1 CPU 1 dev
pktgen.conf-1-2 # 1 CPU 2 dev
-pktgen.conf-2-1 # 2 CPU's 1 dev
-pktgen.conf-2-2 # 2 CPU's 2 dev
pktgen.conf-1-1-rdos # 1 CPU 1 dev w. route DoS
pktgen.conf-1-1-ip6 # 1 CPU 1 dev ipv6
pktgen.conf-1-1-ip6-rdos # 1 CPU 1 dev ipv6 w. route DoS
pktgen.conf-1-1-flows # 1 CPU 1 dev multiple flows.
-Run in shell: ./pktgen.conf-X-Y
-This does all the setup including sending.
-
Interrupt affinity
===================
@@ -217,6 +256,9 @@ Note that when adding devices to a specific CPU it is a good idea to
also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound
to the same CPU. This reduces cache bouncing when freeing skbs.
+Plus using the device flag QUEUE_MAP_CPU, which maps the SKBs TX queue
+to the running threads CPU (directly from smp_processor_id()).
+
Enable IPsec
============
Default IPsec transformation with ESP encapsulation plus transport mode
@@ -237,18 +279,19 @@ Current commands and configuration options
start
stop
+reset
** Thread commands:
add_device
rem_device_all
-max_before_softirq
** Device commands:
count
clone_skb
+burst
debug
frags
@@ -257,10 +300,17 @@ delay
src_mac_count
dst_mac_count
-pkt_size
+pkt_size
min_pkt_size
max_pkt_size
+queue_map_min
+queue_map_max
+skb_priority
+
+tos (ipv4)
+traffic_class (ipv6)
+
mpls
udp_src_min
@@ -269,6 +319,8 @@ udp_src_max
udp_dst_min
udp_dst_max
+node
+
flag
IPSRC_RND
IPDST_RND
@@ -287,6 +339,9 @@ flag
UDPCSUM
IPSEC
NODE_ALLOC
+ NO_TIMESTAMP
+
+spi (ipsec)
dst_min
dst_max
@@ -299,8 +354,10 @@ src_mac
clear_counters
-dst6
src6
+dst6
+dst6_max
+dst6_min
flows
flowlen
@@ -308,6 +365,17 @@ flowlen
rate
ratep
+xmit_mode <start_xmit|netif_receive>
+
+vlan_cfi
+vlan_id
+vlan_p
+
+svlan_cfi
+svlan_id
+svlan_p
+
+
References:
ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index f981a9295a39..da82cd75a4f6 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -1,59 +1,367 @@
-Switch (and switch-ish) device drivers HOWTO
-===========================
-
-Please note that the word "switch" is here used in very generic meaning.
-This include devices supporting L2/L3 but also various flow offloading chips,
-including switches embedded into SR-IOV NICs.
-
-Lets describe a topology a bit. Imagine the following example:
-
- +----------------------------+ +---------------+
- | SOME switch chip | | CPU |
- +----------------------------+ +---------------+
- port1 port2 port3 port4 MNGMNT | PCI-E |
- | | | | | +---------------+
- PHY PHY | | | | NIC0 NIC1
- | | | | | |
- | | +- PCI-E -+ | |
- | +------- MII -------+ |
- +------------- MII ------------+
-
-In this example, there are two independent lines between the switch silicon
-and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are
-separate from the switch driver. SOME switch chip is by managed by a driver
-via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be
-connected to some other type of bus.
-
-Now, for the previous example show the representation in kernel:
-
- +----------------------------+ +---------------+
- | SOME switch chip | | CPU |
- +----------------------------+ +---------------+
- sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT | PCI-E |
- | | | | | +---------------+
- PHY PHY | | | | eth0 eth1
- | | | | | |
- | | +- PCI-E -+ | |
- | +------- MII -------+ |
- +------------- MII ------------+
-
-Lets call the example switch driver for SOME switch chip "SOMEswitch". This
-driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX
-created for each port of a switch. These netdevices are instances
-of "SOMEswitch" driver. sw0pX netdevices serve as a "representation"
-of the switch chip. eth0 and eth1 are instances of some other existing driver.
-
-The only difference of the switch-port netdevice from the ordinary netdevice
-is that is implements couple more NDOs:
-
- ndo_switch_parent_id_get - This returns the same ID for two port netdevices
- of the same physical switch chip. This is
- mandatory to be implemented by all switch drivers
- and serves the caller for recognition of a port
- netdevice.
- ndo_switch_parent_* - Functions that serve for a manipulation of the switch
- chip itself (it can be though of as a "parent" of the
- port, therefore the name). They are not port-specific.
- Caller might use arbitrary port netdevice of the same
- switch and it will make no difference.
- ndo_switch_port_* - Functions that serve for a port-specific manipulation.
+Ethernet switch device driver model (switchdev)
+===============================================
+Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
+
+
+The Ethernet switch device driver model (switchdev) is an in-kernel driver
+model for switch devices which offload the forwarding (data) plane from the
+kernel.
+
+Figure 1 is a block diagram showing the components of the switchdev model for
+an example setup using a data-center-class switch ASIC chip. Other setups
+with SR-IOV or soft switches, such as OVS, are possible.
+
+
+                             User-space tools                                 
+                                                                              
+       user space                   |                                         
+      +-------------------------------------------------------------------+   
+       kernel                       | Netlink                                 
+                                    |                                         
+                     +--------------+-------------------------------+         
+                     |         Network stack                        |         
+                     |           (Linux)                            |         
+                     |                                              |         
+                     +----------------------------------------------+         
+                                                                              
+ sw1p2 sw1p4 sw1p6
+                      sw1p1  + sw1p3 +  sw1p5 +         eth1             
+                        +    |    +    |    +    |            +               
+                        |    |    |    |    |    |            |               
+                     +--+----+----+----+-+--+----+---+  +-----+-----+         
+                     |         Switch driver         |  |    mgmt   |         
+                     |        (this document)        |  |   driver  |         
+                     |                               |  |           |         
+                     +--------------+----------------+  +-----------+         
+                                    |                                         
+       kernel                       | HW bus (eg PCI)                         
+      +-------------------------------------------------------------------+   
+       hardware                     |                                         
+                     +--------------+---+------------+                        
+                     |         Switch device (sw1)   |                        
+                     |  +----+                       +--------+               
+                     |  |    v offloaded data path   | mgmt port              
+                     |  |    |                       |                        
+                     +--|----|----+----+----+----+---+                        
+                        |    |    |    |    |    |                            
+                        +    +    +    +    +    +                            
+                       p1   p2   p3   p4   p5   p6
+                                       
+                             front-panel ports                                
+                                                                              
+
+ Fig 1.
+
+
+Include Files
+-------------
+
+#include <linux/netdevice.h>
+#include <net/switchdev.h>
+
+
+Configuration
+-------------
+
+Use "depends NET_SWITCHDEV" in driver's Kconfig to ensure switchdev model
+support is built for driver.
+
+
+Switch Ports
+------------
+
+On switchdev driver initialization, the driver will allocate and register a
+struct net_device (using register_netdev()) for each enumerated physical switch
+port, called the port netdev. A port netdev is the software representation of
+the physical port and provides a conduit for control traffic to/from the
+controller (the kernel) and the network, as well as an anchor point for higher
+level constructs such as bridges, bonds, VLANs, tunnels, and L3 routers. Using
+standard netdev tools (iproute2, ethtool, etc), the port netdev can also
+provide to the user access to the physical properties of the switch port such
+as PHY link state and I/O statistics.
+
+There is (currently) no higher-level kernel object for the switch beyond the
+port netdevs. All of the switchdev driver ops are netdev ops or switchdev ops.
+
+A switch management port is outside the scope of the switchdev driver model.
+Typically, the management port is not participating in offloaded data plane and
+is loaded with a different driver, such as a NIC driver, on the management port
+device.
+
+Port Netdev Naming
+^^^^^^^^^^^^^^^^^^
+
+Udev rules should be used for port netdev naming, using some unique attribute
+of the port as a key, for example the port MAC address or the port PHYS name.
+Hard-coding of kernel netdev names within the driver is discouraged; let the
+kernel pick the default netdev name, and let udev set the final name based on a
+port attribute.
+
+Using port PHYS name (ndo_get_phys_port_name) for the key is particularly
+useful for dynamically-named ports where the device names its ports based on
+external configuration. For example, if a physical 40G port is split logically
+into 4 10G ports, resulting in 4 port netdevs, the device can give a unique
+name for each port using port PHYS name. The udev rule would be:
+
+SUBSYSTEM=="net", ACTION=="add", DRIVER="<driver>", ATTR{phys_port_name}!="", \
+ NAME="$attr{phys_port_name}"
+
+Suggested naming convention is "swXpYsZ", where X is the switch name or ID, Y
+is the port name or ID, and Z is the sub-port name or ID. For example, sw1p1s0
+would be sub-port 0 on port 1 on switch 1.
+
+Switch ID
+^^^^^^^^^
+
+The switchdev driver must implement the switchdev op switchdev_port_attr_get
+for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same
+physical ID for each port of a switch. The ID must be unique between switches
+on the same system. The ID does not need to be unique between switches on
+different systems.
+
+The switch ID is used to locate ports on a switch and to know if aggregated
+ports belong to the same switch.
+
+Port Features
+^^^^^^^^^^^^^
+
+NETIF_F_NETNS_LOCAL
+
+If the switchdev driver (and device) only supports offloading of the default
+network namespace (netns), the driver should set this feature flag to prevent
+the port netdev from being moved out of the default netns. A netns-aware
+driver/device would not set this flag and be responsible for partitioning
+hardware to preserve netns containment. This means hardware cannot forward
+traffic from a port in one namespace to another port in another namespace.
+
+Port Topology
+^^^^^^^^^^^^^
+
+The port netdevs representing the physical switch ports can be organized into
+higher-level switching constructs. The default construct is a standalone
+router port, used to offload L3 forwarding. Two or more ports can be bonded
+together to form a LAG. Two or more ports (or LAGs) can be bridged to bridge
+L2 networks. VLANs can be applied to sub-divide L2 networks. L2-over-L3
+tunnels can be built on ports. These constructs are built using standard Linux
+tools such as the bridge driver, the bonding/team drivers, and netlink-based
+tools such as iproute2.
+
+The switchdev driver can know a particular port's position in the topology by
+monitoring NETDEV_CHANGEUPPER notifications. For example, a port moved into a
+bond will see it's upper master change. If that bond is moved into a bridge,
+the bond's upper master will change. And so on. The driver will track such
+movements to know what position a port is in in the overall topology by
+registering for netdevice events and acting on NETDEV_CHANGEUPPER.
+
+L2 Forwarding Offload
+---------------------
+
+The idea is to offload the L2 data forwarding (switching) path from the kernel
+to the switchdev device by mirroring bridge FDB entries down to the device. An
+FDB entry is the {port, MAC, VLAN} tuple forwarding destination.
+
+To offloading L2 bridging, the switchdev driver/device should support:
+
+ - Static FDB entries installed on a bridge port
+ - Notification of learned/forgotten src mac/vlans from device
+ - STP state changes on the port
+ - VLAN flooding of multicast/broadcast and unknown unicast packets
+
+Static FDB Entries
+^^^^^^^^^^^^^^^^^^
+
+The switchdev driver should implement ndo_fdb_add, ndo_fdb_del and ndo_fdb_dump
+to support static FDB entries installed to the device. Static bridge FDB
+entries are installed, for example, using iproute2 bridge cmd:
+
+ bridge fdb add ADDR dev DEV [vlan VID] [self]
+
+The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx
+ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using
+switchdev_port_obj_xxx ops.
+
+XXX: what should be done if offloading this rule to hardware fails (for
+example, due to full capacity in hardware tables) ?
+
+Note: by default, the bridge does not filter on VLAN and only bridges untagged
+traffic. To enable VLAN support, turn on VLAN filtering:
+
+ echo 1 >/sys/class/net/<bridge>/bridge/vlan_filtering
+
+Notification of Learned/Forgotten Source MAC/VLANs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The switch device will learn/forget source MAC address/VLAN on ingress packets
+and notify the switch driver of the mac/vlan/port tuples. The switch driver,
+in turn, will notify the bridge driver using the switchdev notifier call:
+
+ err = call_switchdev_notifiers(val, dev, info);
+
+Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when
+forgetting, and info points to a struct switchdev_notifier_fdb_info. On
+SWITCHDEV_FDB_ADD, the bridge driver will install the FDB entry into the
+bridge's FDB and mark the entry as NTF_EXT_LEARNED. The iproute2 bridge
+command will label these entries "offload":
+
+ $ bridge fdb
+ 52:54:00:12:35:01 dev sw1p1 master br0 permanent
+ 00:02:00:00:02:00 dev sw1p1 master br0 offload
+ 00:02:00:00:02:00 dev sw1p1 self
+ 52:54:00:12:35:02 dev sw1p2 master br0 permanent
+ 00:02:00:00:03:00 dev sw1p2 master br0 offload
+ 00:02:00:00:03:00 dev sw1p2 self
+ 33:33:00:00:00:01 dev eth0 self permanent
+ 01:00:5e:00:00:01 dev eth0 self permanent
+ 33:33:ff:00:00:00 dev eth0 self permanent
+ 01:80:c2:00:00:0e dev eth0 self permanent
+ 33:33:00:00:00:01 dev br0 self permanent
+ 01:00:5e:00:00:01 dev br0 self permanent
+ 33:33:ff:12:35:01 dev br0 self permanent
+
+Learning on the port should be disabled on the bridge using the bridge command:
+
+ bridge link set dev DEV learning off
+
+Learning on the device port should be enabled, as well as learning_sync:
+
+ bridge link set dev DEV learning on self
+ bridge link set dev DEV learning_sync on self
+
+Learning_sync attribute enables syncing of the learned/forgotton FDB entry to
+the bridge's FDB. It's possible, but not optimal, to enable learning on the
+device port and on the bridge port, and disable learning_sync.
+
+To support learning and learning_sync port attributes, the driver implements
+switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS.
+The driver should initialize the attributes to the hardware defaults.
+
+FDB Ageing
+^^^^^^^^^^
+
+There are two FDB ageing models supported: 1) ageing by the device, and 2)
+ageing by the kernel. Ageing by the device is preferred if many FDB entries
+are supported. The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+...) to age out the FDB entry. In this model, ageing by the kernel should be
+turned off. XXX: how to turn off ageing in kernel on a per-port basis or
+otherwise prevent the kernel from ageing out the FDB entry?
+
+In the kernel ageing model, the standard bridge ageing mechanism is used to age
+out stale FDB entries. To keep an FDB entry "alive", the driver should refresh
+the FDB entry by calling call_switchdev_notifiers(SWITCHDEV_FDB_ADD, ...). The
+notification will reset the FDB entry's last-used time to now. The driver
+should rate limit refresh notifications, for example, no more than once a
+second. If the FDB entry expires, ndo_fdb_del is called to remove entry from
+the device. XXX: this last part isn't currently correct: ndo_fdb_del isn't
+called, so the stale entry remains in device...this need to get fixed.
+
+FDB Flush
+^^^^^^^^^
+
+XXX: Unimplemented. Need to support FDB flush by bridge driver for port and
+remove both static and learned FDB entries.
+
+STP State Change on Port
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Internally or with a third-party STP protocol implementation (e.g. mstpd), the
+bridge driver maintains the STP state for ports, and will notify the switch
+driver of STP state change on a port using the switchdev op
+switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE.
+
+State is one of BR_STATE_*. The switch driver can use STP state updates to
+update ingress packet filter list for the port. For example, if port is
+DISABLED, no packets should pass, but if port moves to BLOCKED, then STP BPDUs
+and other IEEE 01:80:c2:xx:xx:xx link-local multicast packets can pass.
+
+Note that STP BDPUs are untagged and STP state applies to all VLANs on the port
+so packet filters should be applied consistently across untagged and tagged
+VLANs on the port.
+
+Flooding L2 domain
+^^^^^^^^^^^^^^^^^^
+
+For a given L2 VLAN domain, the switch device should flood multicast/broadcast
+and unknown unicast packets to all ports in domain, if allowed by port's
+current STP state. The switch driver, knowing which ports are within which
+vlan L2 domain, can program the switch device for flooding. The packet should
+also be sent to the port netdev for processing by the bridge driver. The
+bridge should not reflood the packet to the same ports the device flooded.
+XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+
+It is possible for the switch device to not handle flooding and push the
+packets up to the bridge driver for flooding. This is not ideal as the number
+of ports scale in the L2 domain as the device is much more efficient at
+flooding packets that software.
+
+IGMP Snooping
+^^^^^^^^^^^^^
+
+XXX: complete this section
+
+
+L3 Routing Offload
+------------------
+
+Offloading L3 routing requires that device be programmed with FIB entries from
+the kernel, with the device doing the FIB lookup and forwarding. The device
+does a longest prefix match (LPM) on FIB entries matching route prefix and
+forwards the packet to the matching FIB entry's nexthop(s) egress ports.
+
+To program the device, the driver implements support for
+SWITCHDEV_OBJ_IPV[4|6]_FIB object using switchdev_port_obj_xxx ops.
+switchdev_port_obj_add is used for both adding a new FIB entry to the device,
+or modifying an existing entry on the device.
+
+XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported.
+
+SWITCHDEV_OBJ_IPV4_FIB object passes:
+
+ struct switchdev_obj_ipv4_fib { /* IPV4_FIB */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 nlflags;
+ u32 tb_id;
+ } ipv4_fib;
+
+to add/modify/delete IPv4 dst/dest_len prefix on table tb_id. The *fi
+structure holds details on the route and route's nexthops. *dev is one of the
+port netdevs mentioned in the routes next hop list. If the output port netdevs
+referenced in the route's nexthop list don't all have the same switch ID, the
+driver is not called to add/modify/delete the FIB entry.
+
+Routes offloaded to the device are labeled with "offload" in the ip route
+listing:
+
+ $ ip route show
+ default via 192.168.0.2 dev eth0
+ 11.0.0.0/30 dev sw1p1 proto kernel scope link src 11.0.0.2 offload
+ 11.0.0.4/30 via 11.0.0.1 dev sw1p1 proto zebra metric 20 offload
+ 11.0.0.8/30 dev sw1p2 proto kernel scope link src 11.0.0.10 offload
+ 11.0.0.12/30 via 11.0.0.9 dev sw1p2 proto zebra metric 20 offload
+ 12.0.0.2 proto zebra metric 30 offload
+ nexthop via 11.0.0.1 dev sw1p1 weight 1
+ nexthop via 11.0.0.9 dev sw1p2 weight 1
+ 12.0.0.3 via 11.0.0.1 dev sw1p1 proto zebra metric 20 offload
+ 12.0.0.4 via 11.0.0.9 dev sw1p2 proto zebra metric 20 offload
+ 192.168.0.0/24 dev eth0 proto kernel scope link src 192.168.0.15
+
+XXX: add/mod/del IPv6 FIB API
+
+Nexthop Resolution
+^^^^^^^^^^^^^^^^^^
+
+The FIB entry's nexthop list contains the nexthop tuple (gateway, dev), but for
+the switch device to forward the packet with the correct dst mac address, the
+nexthop gateways must be resolved to the neighbor's mac address. Neighbor mac
+address discovery comes via the ARP (or ND) process and is available via the
+arp_tbl neighbor table. To resolve the routes nexthop gateways, the driver
+should trigger the kernel's neighbor resolution process. See the rocker
+driver's rocker_port_ipv4_resolve() for an example.
+
+The driver can monitor for updates to arp_tbl using the netevent notifier
+NETEVENT_NEIGH_UPDATE. The device can be programmed with resolved nexthops
+for the routes as arp_tbl updates.
diff --git a/Documentation/networking/tc-actions-env-rules.txt b/Documentation/networking/tc-actions-env-rules.txt
index 70d6cf608251..f37814693ad3 100644
--- a/Documentation/networking/tc-actions-env-rules.txt
+++ b/Documentation/networking/tc-actions-env-rules.txt
@@ -8,14 +8,8 @@ For example if your action queues a packet to be processed later,
or intentionally branches by redirecting a packet, then you need to
clone the packet.
-There are certain fields in the skb tc_verd that need to be reset so we
-avoid loops, etc. A few are generic enough that skb_act_clone()
-resets them for you, so invoke skb_act_clone() rather than skb_clone().
-
2) If you munge any packet thou shalt call pskb_expand_head in the case
someone else is referencing the skb. After that you "own" the skb.
-You must also tell us if it is ok to munge the packet (TC_OK2MUNGE),
-this way any action downstream can stomp on the packet.
3) Dropping packets you don't own is a no-no. You simply return
TC_ACT_SHOT to the caller and they will drop it.
diff --git a/Documentation/s390/qeth.txt b/Documentation/s390/qeth.txt
index 74122ada9949..aa06fcf5f8c2 100644
--- a/Documentation/s390/qeth.txt
+++ b/Documentation/s390/qeth.txt
@@ -1,6 +1,6 @@
IBM s390 QDIO Ethernet Driver
-HiperSockets Bridge Port Support
+OSA and HiperSockets Bridge Port Support
Uevents
@@ -8,7 +8,7 @@ To generate the events the device must be assigned a role of either
a primary or a secondary Bridge Port. For more information, see
"z/VM Connectivity, SC24-6174".
-When run on HiperSockets Bridge Capable Port hardware, and the state
+When run on an OSA or HiperSockets Bridge Capable Port hardware, and the state
of some configured Bridge Port device on the channel changes, a udev
event with ACTION=CHANGE is emitted on behalf of the corresponding
ccwgroup device. The event has the following attributes:
diff --git a/MAINTAINERS b/MAINTAINERS
index d8afd2953678..a7d9bfe04c6b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -652,7 +652,6 @@ M: Tom Lendacky <thomas.lendacky@amd.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/amd/xgbe/
-F: drivers/net/phy/amd-xgbe-phy.c
AMS (Apple Motion Sensor) DRIVER
M: Michael Hanselmann <linux-kernel@hansmi.ch>
@@ -922,6 +921,13 @@ M: Krzysztof Halasa <khalasa@piap.pl>
S: Maintained
F: arch/arm/mach-cns3xxx/
+ARM/CAVIUM THUNDER NETWORK DRIVER
+M: Sunil Goutham <sgoutham@cavium.com>
+M: Robert Richter <rric@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: drivers/net/ethernet/cavium/
+
ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
M: Alexander Shiyan <shc_work@mail.ru>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2436,6 +2442,17 @@ S: Maintained
F: drivers/iio/light/cm*
F: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+CAVIUM LIQUIDIO NETWORK DRIVER
+M: Derek Chickles <derek.chickles@caviumnetworks.com>
+M: Satanand Burla <satananda.burla@caviumnetworks.com>
+M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
+M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
+L: netdev@vger.kernel.org
+W: http://www.cavium.com
+S: Supported
+F: drivers/net/ethernet/cavium/
+F: drivers/net/ethernet/cavium/liquidio/
+
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
@@ -6366,6 +6383,12 @@ F: include/uapi/linux/meye.h
F: include/uapi/linux/ivtv*
F: include/uapi/linux/uvcvideo.h
+MEDIATEK MT7601U WIRELESS LAN DRIVER
+M: Jakub Kicinski <kubakici@wp.pl>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/mediatek/mt7601u/
+
MEGARAID SCSI/SAS DRIVERS
M: Kashyap Desai <kashyap.desai@avagotech.com>
M: Sumit Saxena <sumit.saxena@avagotech.com>
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index e0e23582c8b4..4550d247e308 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -873,6 +873,16 @@ b_epilogue:
off = offsetof(struct sk_buff, queue_mapping);
emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
break;
+ case BPF_LDX | BPF_W | BPF_ABS:
+ /*
+ * load a 32bit word from struct seccomp_data.
+ * seccomp_check_filter() will already have checked
+ * that k is 32bit aligned and lies within the
+ * struct seccomp_data.
+ */
+ ctx->seen |= SEEN_SKB;
+ emit(ARM_LDR_I(r_A, r_skb, k), ctx);
+ break;
default:
return -1;
}
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index de156ba3bd71..f6498eec9ee1 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -28,6 +28,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* | old backchain | |
* +---------------+ |
* | r15 - r6 | |
+ * +---------------+ |
+ * | 4 byte align | |
+ * | tail_call_cnt | |
* BFP -> +===============+ |
* | | |
* | BPF stack | |
@@ -46,14 +49,17 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* R15 -> +---------------+ + low
*
* We get 160 bytes stack space from calling function, but only use
- * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
+ * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
*/
#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160)
-#define STK_160_UNUSED (160 - 11 * 8)
+#define STK_160_UNUSED (160 - 12 * 8)
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
+#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
+#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
+
/* Offset to skip condition code check */
#define OFF_OK 4
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 55423d8be580..d3766dd67e23 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -21,6 +21,7 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/init.h>
+#include <linux/bpf.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
#include "bpf_jit.h"
@@ -40,6 +41,8 @@ struct bpf_jit {
int base_ip; /* Base address for literal pool */
int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */
+ int tail_call_start; /* Tail call start offset */
+ int labels[1]; /* Labels for local jumps */
};
#define BPF_SIZE_MAX 4096 /* Max size for program */
@@ -49,6 +52,7 @@ struct bpf_jit {
#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */
#define SEEN_LITERAL 8 /* code uses literals */
#define SEEN_FUNC 16 /* calls C functions */
+#define SEEN_TAIL_CALL 32 /* code uses tail calls */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
/*
@@ -60,6 +64,7 @@ struct bpf_jit {
#define REG_L (__MAX_BPF_REG+3) /* Literal pool register */
#define REG_15 (__MAX_BPF_REG+4) /* Register 15 */
#define REG_0 REG_W0 /* Register 0 */
+#define REG_1 REG_W1 /* Register 1 */
#define REG_2 BPF_REG_1 /* Register 2 */
#define REG_14 BPF_REG_0 /* Register 14 */
@@ -223,6 +228,24 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b3); \
})
+#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
+({ \
+ int rel = (jit->labels[label] - jit->prg) >> 1; \
+ _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \
+ op2 | mask << 12); \
+ REG_SET_SEEN(b1); \
+ REG_SET_SEEN(b2); \
+})
+
+#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
+({ \
+ int rel = (jit->labels[label] - jit->prg) >> 1; \
+ _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \
+ (rel & 0xffff), op2 | (imm & 0xff) << 8); \
+ REG_SET_SEEN(b1); \
+ BUILD_BUG_ON(((unsigned long) imm) > 0xff); \
+})
+
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
/* Branch instruction needs 6 bytes */ \
@@ -286,7 +309,7 @@ static void jit_fill_hole(void *area, unsigned int size)
*/
static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- u32 off = 72 + (rs - 6) * 8;
+ u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (rs == re)
/* stg %rs,off(%r15) */
@@ -301,7 +324,7 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
*/
static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
{
- u32 off = 72 + (rs - 6) * 8;
+ u32 off = STK_OFF_R6 + (rs - 6) * 8;
if (jit->seen & SEEN_STACK)
off += STK_OFF;
@@ -374,6 +397,16 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
*/
static void bpf_jit_prologue(struct bpf_jit *jit)
{
+ if (jit->seen & SEEN_TAIL_CALL) {
+ /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
+ _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
+ } else {
+ /* j tail_call_start: NOP if no tail calls are used */
+ EMIT4_PCREL(0xa7f40000, 6);
+ _EMIT2(0);
+ }
+ /* Tail calls have to skip above initialization */
+ jit->tail_call_start = jit->prg;
/* Save registers */
save_restore_regs(jit, REGS_SAVE);
/* Setup literal pool */
@@ -951,6 +984,75 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
}
+ case BPF_JMP | BPF_CALL | BPF_X:
+ /*
+ * Implicit input:
+ * B1: pointer to ctx
+ * B2: pointer to bpf_array
+ * B3: index in bpf_array
+ */
+ jit->seen |= SEEN_TAIL_CALL;
+
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+
+ /* llgf %w1,map.max_entries(%b2) */
+ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
+ offsetof(struct bpf_array, map.max_entries));
+ /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
+ REG_W1, 0, 0xa);
+
+ /*
+ * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+
+ if (jit->seen & SEEN_STACK)
+ off = STK_OFF_TCCNT + STK_OFF;
+ else
+ off = STK_OFF_TCCNT;
+ /* lhi %w0,1 */
+ EMIT4_IMM(0xa7080000, REG_W0, 1);
+ /* laal %w1,%w0,off(%r15) */
+ EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
+ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
+ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
+ MAX_TAIL_CALL_CNT, 0, 0x2);
+
+ /*
+ * prog = array->prog[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+
+ /* sllg %r1,%b3,3: %r1 = index * 8 */
+ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
+ /* lg %r1,prog(%b2,%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
+ REG_1, offsetof(struct bpf_array, prog));
+ /* clgij %r1,0,0x8,label0 */
+ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
+
+ /*
+ * Restore registers before calling function
+ */
+ save_restore_regs(jit, REGS_RESTORE);
+
+ /*
+ * goto *(prog->bpf_func + tail_call_start);
+ */
+
+ /* lg %r1,bpf_func(%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
+ offsetof(struct bpf_prog, bpf_func));
+ /* bc 0xf,tail_call_start(%r1) */
+ _EMIT4(0x47f01000 + jit->tail_call_start);
+ /* out: */
+ jit->labels[0] = jit->prg;
+ break;
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
if (last && !(jit->seen & SEEN_RET0))
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ddeff4844a10..579a8fd74be0 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -12,6 +12,7 @@
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
+#include <linux/bpf.h>
int bpf_jit_enable __read_mostly;
@@ -37,7 +38,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
return ptr + len;
}
-#define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
+#define EMIT(bytes, len) \
+ do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
@@ -186,31 +188,31 @@ struct jit_context {
#define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64
-static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
- int oldproglen, struct jit_context *ctx)
+#define STACKSIZE \
+ (MAX_BPF_STACK + \
+ 32 /* space for rbx, r13, r14, r15 */ + \
+ 8 /* space for skb_copy_bits() buffer */)
+
+#define PROLOGUE_SIZE 51
+
+/* emit x64 prologue code for BPF program and check it's size.
+ * bpf_tail_call helper will skip it while jumping into another program
+ */
+static void emit_prologue(u8 **pprog)
{
- struct bpf_insn *insn = bpf_prog->insnsi;
- int insn_cnt = bpf_prog->len;
- bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
- bool seen_exit = false;
- u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
- int i;
- int proglen = 0;
- u8 *prog = temp;
- int stacksize = MAX_BPF_STACK +
- 32 /* space for rbx, r13, r14, r15 */ +
- 8 /* space for skb_copy_bits() buffer */;
+ u8 *prog = *pprog;
+ int cnt = 0;
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
- /* sub rsp, stacksize */
- EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+ /* sub rsp, STACKSIZE */
+ EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
/* all classic BPF filters use R6(rbx) save it */
/* mov qword ptr [rbp-X],rbx */
- EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+ EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
* as temporary, so all tcpdump filters need to spill/fill R7(r13) and
@@ -221,16 +223,112 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
*/
/* mov qword ptr [rbp-X],r13 */
- EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+ EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
/* mov qword ptr [rbp-X],r14 */
- EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+ EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
/* mov qword ptr [rbp-X],r15 */
- EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+ EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
/* clear A and X registers */
EMIT2(0x31, 0xc0); /* xor eax, eax */
EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
+ /* clear tail_cnt: mov qword ptr [rbp-X], rax */
+ EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
+
+ BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
+ *pprog = prog;
+}
+
+/* generate the following code:
+ * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
+ * if (index >= array->map.max_entries)
+ * goto out;
+ * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ * prog = array->prog[index];
+ * if (prog == NULL)
+ * goto out;
+ * goto *(prog->bpf_func + prologue_size);
+ * out:
+ */
+static void emit_bpf_tail_call(u8 **pprog)
+{
+ u8 *prog = *pprog;
+ int label1, label2, label3;
+ int cnt = 0;
+
+ /* rdi - pointer to ctx
+ * rsi - pointer to bpf_array
+ * rdx - index in bpf_array
+ */
+
+ /* if (index >= array->map.max_entries)
+ * goto out;
+ */
+ EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
+ offsetof(struct bpf_array, map.max_entries));
+ EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
+#define OFFSET1 44 /* number of bytes to jump */
+ EMIT2(X86_JBE, OFFSET1); /* jbe out */
+ label1 = cnt;
+
+ /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+ EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
+ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
+#define OFFSET2 33
+ EMIT2(X86_JA, OFFSET2); /* ja out */
+ label2 = cnt;
+ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
+ EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
+
+ /* prog = array->prog[index]; */
+ EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */
+ EMIT1(offsetof(struct bpf_array, prog));
+ EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
+
+ /* if (prog == NULL)
+ * goto out;
+ */
+ EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */
+#define OFFSET3 10
+ EMIT2(X86_JE, OFFSET3); /* je out */
+ label3 = cnt;
+
+ /* goto *(prog->bpf_func + prologue_size); */
+ EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
+ offsetof(struct bpf_prog, bpf_func));
+ EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
+
+ /* now we're ready to jump into next BPF program
+ * rdi == ctx (1st arg)
+ * rax == prog->bpf_func + prologue_size
+ */
+ EMIT2(0xFF, 0xE0); /* jmp rax */
+
+ /* out: */
+ BUILD_BUG_ON(cnt - label1 != OFFSET1);
+ BUILD_BUG_ON(cnt - label2 != OFFSET2);
+ BUILD_BUG_ON(cnt - label3 != OFFSET3);
+ *pprog = prog;
+}
+
+static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+ int oldproglen, struct jit_context *ctx)
+{
+ struct bpf_insn *insn = bpf_prog->insnsi;
+ int insn_cnt = bpf_prog->len;
+ bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+ bool seen_exit = false;
+ u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
+ int i, cnt = 0;
+ int proglen = 0;
+ u8 *prog = temp;
+
+ emit_prologue(&prog);
+
if (seen_ld_abs) {
/* r9d : skb->len - skb->data_len (headlen)
* r10 : skb->data
@@ -739,6 +837,10 @@ xadd: if (is_imm8(insn->off))
}
break;
+ case BPF_JMP | BPF_CALL | BPF_X:
+ emit_bpf_tail_call(&prog);
+ break;
+
/* cond jump */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
@@ -891,13 +993,13 @@ common_load:
/* update cleanup_addr */
ctx->cleanup_addr = proglen;
/* mov rbx, qword ptr [rbp-X] */
- EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+ EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
/* mov r13, qword ptr [rbp-X] */
- EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+ EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
/* mov r14, qword ptr [rbp-X] */
- EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+ EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
/* mov r15, qword ptr [rbp-X] */
- EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+ EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
EMIT1(0xC9); /* leave */
EMIT1(0xC3); /* ret */
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index f22cc56fd1b3..5ad0d5354535 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -244,7 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
if (!type)
goto unlock;
- sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+ sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
err = -ENOMEM;
if (!sk2)
goto unlock;
@@ -324,7 +324,7 @@ static int alg_create(struct net *net, struct socket *sock, int protocol,
return -EPROTONOSUPPORT;
err = -ENOMEM;
- sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+ sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
if (!sk)
goto out;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 93dca2e73bf5..a8da3a50e374 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -116,8 +116,8 @@ static bool disable64;
static short nvpibits = -1;
static short nvcibits = -1;
static short rx_skb_reserve = 16;
-static bool irq_coalesce = 1;
-static bool sdh = 0;
+static bool irq_coalesce = true;
+static bool sdh;
/* Read from EEPROM = 0000 0011b */
static unsigned int readtab[] = {
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 909c95bd7be2..feb023d7eebd 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -306,14 +306,12 @@ static int idt77105_start(struct atm_dev *dev)
if (start_timer) {
start_timer = 0;
- init_timer(&stats_timer);
+ setup_timer(&stats_timer, idt77105_stats_timer_func, 0UL);
stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD;
- stats_timer.function = idt77105_stats_timer_func;
add_timer(&stats_timer);
- init_timer(&restart_timer);
+ setup_timer(&restart_timer, idt77105_restart_timer_func, 0UL);
restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD;
- restart_timer.function = idt77105_restart_timer_func;
add_timer(&restart_timer);
}
spin_unlock_irqrestore(&idt77105_priv_lock, flags);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 924f8e26789d..65e65903faa0 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2618,7 +2618,7 @@ static void ia_close(struct atm_vcc *vcc)
if (vcc->qos.txtp.traffic_class != ATM_NONE) {
iadev->close_pending++;
prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
- schedule_timeout(50);
+ schedule_timeout(msecs_to_jiffies(500));
finish_wait(&iadev->timeout_wait, &wait);
spin_lock_irqsave(&iadev->tx_lock, flags);
while((skb = skb_dequeue(&iadev->tx_backlog))) {
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 74ccb02e0f10..5f6018e7cd4c 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -226,6 +226,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->of_node = cc->core->dev.of_node;
#endif
switch (bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
chip->ngpio = 32;
@@ -235,16 +236,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
}
/*
- * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
- * pin numbers. We don't have Device Tree there and we can't really use
- * relative (per chip) numbers.
- * So let's use predictable base for BCM47XX and "random" for all other.
+ * Register SoC GPIO devices with absolute GPIO pin base.
+ * On MIPS, we don't have Device Tree and we can't use relative (per chip)
+ * GPIO numbers.
+ * On some ARM devices, user space may want to access some system GPIO
+ * pins directly, which is easier to do with a predictable GPIO base.
*/
-#if IS_BUILTIN(CONFIG_BCM47XX)
- chip->base = bus->num * BCMA_GPIO_MAX_PINS;
-#else
- chip->base = -1;
-#endif
+ if (IS_BUILTIN(CONFIG_BCM47XX) ||
+ cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ chip->base = bus->num * BCMA_GPIO_MAX_PINS;
+ else
+ chip->base = -1;
err = bcma_gpio_irq_domain_init(cc);
if (err)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cee20354ac37..c097909c589c 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -598,7 +598,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+ err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
if (err < 0) {
sock = NULL;
@@ -693,7 +693,7 @@ static int prepare_listen_socket(struct drbd_connection *connection, struct acce
memcpy(&my_addr, &connection->my_addr, my_addr_len);
what = "sock_create_kern";
- err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+ err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
SOCK_STREAM, IPPROTO_TCP, &s_listen);
if (err) {
s_listen = NULL;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index ed5c2738bea2..2e777071e1dc 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -9,6 +9,10 @@ config BT_BCM
tristate
select FW_LOADER
+config BT_RTL
+ tristate
+ select FW_LOADER
+
config BT_HCIBTUSB
tristate "HCI USB driver"
depends on USB
@@ -32,6 +36,17 @@ config BT_HCIBTUSB_BCM
Say Y here to compile support for Broadcom protocol.
+config BT_HCIBTUSB_RTL
+ bool "Realtek protocol support"
+ depends on BT_HCIBTUSB
+ select BT_RTL
+ default y
+ help
+ The Realtek protocol support enables firmware and configuration
+ download support for Realtek Bluetooth controllers.
+
+ Say Y here to compile support for Realtek protocol.
+
config BT_HCIBTSDIO
tristate "HCI SDIO driver"
depends on MMC
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index dd0d9c40b999..f40e194e7080 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_BCM) += btbcm.o
+obj-$(CONFIG_BT_RTL) += btrtl.o
btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 4bba86677adc..728fce38a5a2 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -55,12 +55,6 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
}
bda = (struct hci_rp_read_bd_addr *)skb->data;
- if (bda->status) {
- BT_ERR("%s: BCM: Device address result failed (%02x)",
- hdev->name, bda->status);
- kfree_skb(skb);
- return -bt_to_errno(bda->status);
- }
/* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
* with no configured address.
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 2d43d4279b00..828f2f8d1568 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -53,12 +53,6 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
}
bda = (struct hci_rp_read_bd_addr *)skb->data;
- if (bda->status) {
- BT_ERR("%s: Intel device address result failed (%02x)",
- hdev->name, bda->status);
- kfree_skb(skb);
- return -bt_to_errno(bda->status);
- }
/* For some Intel based controllers, the default Bluetooth device
* address 00:03:19:9E:8B:00 can be found. These controllers are
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 01d6da577eeb..b9a811900f6a 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1217,7 +1217,7 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv)
unsigned int reg, reg_start, reg_end;
enum rdwr_status stat;
u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr;
- u8 dump_num, idx, i, read_reg, doneflag = 0;
+ u8 dump_num = 0, idx, i, read_reg, doneflag = 0;
u32 memory_size, fw_dump_len = 0;
/* dump sdio register first */
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
new file mode 100644
index 000000000000..84288938f7f2
--- /dev/null
+++ b/drivers/bluetooth/btrtl.c
@@ -0,0 +1,390 @@
+/*
+ * Bluetooth support for Realtek devices
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/usb.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btrtl.h"
+
+#define VERSION "0.1"
+
+#define RTL_EPATCH_SIGNATURE "Realtech"
+#define RTL_ROM_LMP_3499 0x3499
+#define RTL_ROM_LMP_8723A 0x1200
+#define RTL_ROM_LMP_8723B 0x8723
+#define RTL_ROM_LMP_8821A 0x8821
+#define RTL_ROM_LMP_8761A 0x8761
+
+static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
+{
+ struct rtl_rom_version_evt *rom_version;
+ struct sk_buff *skb;
+
+ /* Read RTL ROM version command */
+ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: Read ROM version failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len != sizeof(*rom_version)) {
+ BT_ERR("%s: RTL version event length mismatch", hdev->name);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ rom_version = (struct rtl_rom_version_evt *)skb->data;
+ BT_INFO("%s: rom_version status=%x version=%x",
+ hdev->name, rom_version->status, rom_version->version);
+
+ *version = rom_version->version;
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+ const struct firmware *fw,
+ unsigned char **_buf)
+{
+ const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+ struct rtl_epatch_header *epatch_info;
+ unsigned char *buf;
+ int i, ret, len;
+ size_t min_size;
+ u8 opcode, length, data, rom_version = 0;
+ int project_id = -1;
+ const unsigned char *fwptr, *chip_id_base;
+ const unsigned char *patch_length_base, *patch_offset_base;
+ u32 patch_offset = 0;
+ u16 patch_length, num_patches;
+ const u16 project_id_to_lmp_subver[] = {
+ RTL_ROM_LMP_8723A,
+ RTL_ROM_LMP_8723B,
+ RTL_ROM_LMP_8821A,
+ RTL_ROM_LMP_8761A
+ };
+
+ ret = rtl_read_rom_version(hdev, &rom_version);
+ if (ret)
+ return ret;
+
+ min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ fwptr = fw->data + fw->size - sizeof(extension_sig);
+ if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
+ BT_ERR("%s: extension section signature mismatch", hdev->name);
+ return -EINVAL;
+ }
+
+ /* Loop from the end of the firmware parsing instructions, until
+ * we find an instruction that identifies the "project ID" for the
+ * hardware supported by this firwmare file.
+ * Once we have that, we double-check that that project_id is suitable
+ * for the hardware we are working with.
+ */
+ while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+ opcode = *--fwptr;
+ length = *--fwptr;
+ data = *--fwptr;
+
+ BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
+
+ if (opcode == 0xff) /* EOF */
+ break;
+
+ if (length == 0) {
+ BT_ERR("%s: found instruction with length 0",
+ hdev->name);
+ return -EINVAL;
+ }
+
+ if (opcode == 0 && length == 1) {
+ project_id = data;
+ break;
+ }
+
+ fwptr -= length;
+ }
+
+ if (project_id < 0) {
+ BT_ERR("%s: failed to find version instruction", hdev->name);
+ return -EINVAL;
+ }
+
+ if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+ BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+ return -EINVAL;
+ }
+
+ if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+ BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
+ project_id_to_lmp_subver[project_id], lmp_subver);
+ return -EINVAL;
+ }
+
+ epatch_info = (struct rtl_epatch_header *)fw->data;
+ if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+ BT_ERR("%s: bad EPATCH signature", hdev->name);
+ return -EINVAL;
+ }
+
+ num_patches = le16_to_cpu(epatch_info->num_patches);
+ BT_DBG("fw_version=%x, num_patches=%d",
+ le32_to_cpu(epatch_info->fw_version), num_patches);
+
+ /* After the rtl_epatch_header there is a funky patch metadata section.
+ * Assuming 2 patches, the layout is:
+ * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
+ *
+ * Find the right patch for this chip.
+ */
+ min_size += 8 * num_patches;
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+ patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
+ patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
+ for (i = 0; i < num_patches; i++) {
+ u16 chip_id = get_unaligned_le16(chip_id_base +
+ (i * sizeof(u16)));
+ if (chip_id == rom_version + 1) {
+ patch_length = get_unaligned_le16(patch_length_base +
+ (i * sizeof(u16)));
+ patch_offset = get_unaligned_le32(patch_offset_base +
+ (i * sizeof(u32)));
+ break;
+ }
+ }
+
+ if (!patch_offset) {
+ BT_ERR("%s: didn't find patch for chip id %d",
+ hdev->name, rom_version);
+ return -EINVAL;
+ }
+
+ BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
+ min_size = patch_offset + patch_length;
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ /* Copy the firmware into a new buffer and write the version at
+ * the end.
+ */
+ len = patch_length;
+ buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+ *_buf = buf;
+ return len;
+}
+
+static int rtl_download_firmware(struct hci_dev *hdev,
+ const unsigned char *data, int fw_len)
+{
+ struct rtl_download_cmd *dl_cmd;
+ int frag_num = fw_len / RTL_FRAG_LEN + 1;
+ int frag_len = RTL_FRAG_LEN;
+ int ret = 0;
+ int i;
+
+ dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+ if (!dl_cmd)
+ return -ENOMEM;
+
+ for (i = 0; i < frag_num; i++) {
+ struct sk_buff *skb;
+
+ BT_DBG("download fw (%d/%d)", i, frag_num);
+
+ dl_cmd->index = i;
+ if (i == (frag_num - 1)) {
+ dl_cmd->index |= 0x80; /* data end */
+ frag_len = fw_len % RTL_FRAG_LEN;
+ }
+ memcpy(dl_cmd->data, data, frag_len);
+
+ /* Send download command */
+ skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: download fw command failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ ret = -PTR_ERR(skb);
+ goto out;
+ }
+
+ if (skb->len != sizeof(struct rtl_download_response)) {
+ BT_ERR("%s: download fw event length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ ret = -EIO;
+ goto out;
+ }
+
+ kfree_skb(skb);
+ data += RTL_FRAG_LEN;
+ }
+
+out:
+ kfree(dl_cmd);
+ return ret;
+}
+
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+{
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+ ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
+ return ret;
+ }
+
+ if (fw->size < 8) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Check that the firmware doesn't have the epatch signature
+ * (which is only for RTL8723B and newer).
+ */
+ if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
+ BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rtl_download_firmware(hdev, fw->data, fw->size);
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
+ const char *fw_name)
+{
+ unsigned char *fw_data = NULL;
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+ ret = request_firmware(&fw, fw_name, &hdev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
+ return ret;
+ }
+
+ ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+ if (ret < 0)
+ goto out;
+
+ ret = rtl_download_firmware(hdev, fw_data, ret);
+ kfree(fw_data);
+ if (ret < 0)
+ goto out;
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+ hdev->name, PTR_ERR(skb));
+ return skb;
+ }
+
+ if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+ BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+ hdev->name);
+ kfree_skb(skb);
+ return ERR_PTR(-EIO);
+ }
+
+ return skb;
+}
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct hci_rp_read_local_version *resp;
+ u16 lmp_subver;
+
+ skb = btrtl_read_local_version(hdev);
+ if (IS_ERR(skb))
+ return -PTR_ERR(skb);
+
+ resp = (struct hci_rp_read_local_version *)skb->data;
+ BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+ "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
+ resp->lmp_ver, resp->lmp_subver);
+
+ lmp_subver = le16_to_cpu(resp->lmp_subver);
+ kfree_skb(skb);
+
+ /* Match a set of subver values that correspond to stock firmware,
+ * which is not compatible with standard btusb.
+ * If matched, upload an alternative firmware that does conform to
+ * standard btusb. Once that firmware is uploaded, the subver changes
+ * to a different value.
+ */
+ switch (lmp_subver) {
+ case RTL_ROM_LMP_8723A:
+ case RTL_ROM_LMP_3499:
+ return btrtl_setup_rtl8723a(hdev);
+ case RTL_ROM_LMP_8723B:
+ return btrtl_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8723b_fw.bin");
+ case RTL_ROM_LMP_8821A:
+ return btrtl_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8821a_fw.bin");
+ case RTL_ROM_LMP_8761A:
+ return btrtl_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8761a_fw.bin");
+ default:
+ BT_INFO("rtl: assuming no firmware upload needed.");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+
+MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
new file mode 100644
index 000000000000..38ffe4890cd1
--- /dev/null
+++ b/drivers/bluetooth/btrtl.h
@@ -0,0 +1,52 @@
+/*
+ * Bluetooth support for Realtek devices
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define RTL_FRAG_LEN 252
+
+struct rtl_download_cmd {
+ __u8 index;
+ __u8 data[RTL_FRAG_LEN];
+} __packed;
+
+struct rtl_download_response {
+ __u8 status;
+ __u8 index;
+} __packed;
+
+struct rtl_rom_version_evt {
+ __u8 status;
+ __u8 version;
+} __packed;
+
+struct rtl_epatch_header {
+ __u8 signature[8];
+ __le32 fw_version;
+ __le16 num_patches;
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_RTL)
+
+int btrtl_setup_realtek(struct hci_dev *hdev);
+
+#else
+
+static inline int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3c10d4dfe9a7..94c6c048130f 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -31,13 +31,14 @@
#include "btintel.h"
#include "btbcm.h"
+#include "btrtl.h"
#define VERSION "0.8"
static bool disable_scofix;
static bool force_scofix;
-static bool reset = 1;
+static bool reset = true;
static struct usb_driver btusb_driver;
@@ -330,6 +331,7 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_FIRMWARE_LOADED 7
#define BTUSB_FIRMWARE_FAILED 8
#define BTUSB_BOOTING 9
+#define BTUSB_RESET_RESUME 10
struct btusb_data {
struct hci_dev *hdev;
@@ -1372,378 +1374,6 @@ static int btusb_setup_csr(struct hci_dev *hdev)
return ret;
}
-#define RTL_FRAG_LEN 252
-
-struct rtl_download_cmd {
- __u8 index;
- __u8 data[RTL_FRAG_LEN];
-} __packed;
-
-struct rtl_download_response {
- __u8 status;
- __u8 index;
-} __packed;
-
-struct rtl_rom_version_evt {
- __u8 status;
- __u8 version;
-} __packed;
-
-struct rtl_epatch_header {
- __u8 signature[8];
- __le32 fw_version;
- __le16 num_patches;
-} __packed;
-
-#define RTL_EPATCH_SIGNATURE "Realtech"
-#define RTL_ROM_LMP_3499 0x3499
-#define RTL_ROM_LMP_8723A 0x1200
-#define RTL_ROM_LMP_8723B 0x8723
-#define RTL_ROM_LMP_8821A 0x8821
-#define RTL_ROM_LMP_8761A 0x8761
-
-static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
-{
- struct rtl_rom_version_evt *rom_version;
- struct sk_buff *skb;
- int ret;
-
- /* Read RTL ROM version command */
- skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: Read ROM version failed (%ld)",
- hdev->name, PTR_ERR(skb));
- return PTR_ERR(skb);
- }
-
- if (skb->len != sizeof(*rom_version)) {
- BT_ERR("%s: RTL version event length mismatch", hdev->name);
- kfree_skb(skb);
- return -EIO;
- }
-
- rom_version = (struct rtl_rom_version_evt *)skb->data;
- BT_INFO("%s: rom_version status=%x version=%x",
- hdev->name, rom_version->status, rom_version->version);
-
- ret = rom_version->status;
- if (ret == 0)
- *version = rom_version->version;
-
- kfree_skb(skb);
- return ret;
-}
-
-static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
- const struct firmware *fw,
- unsigned char **_buf)
-{
- const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
- struct rtl_epatch_header *epatch_info;
- unsigned char *buf;
- int i, ret, len;
- size_t min_size;
- u8 opcode, length, data, rom_version = 0;
- int project_id = -1;
- const unsigned char *fwptr, *chip_id_base;
- const unsigned char *patch_length_base, *patch_offset_base;
- u32 patch_offset = 0;
- u16 patch_length, num_patches;
- const u16 project_id_to_lmp_subver[] = {
- RTL_ROM_LMP_8723A,
- RTL_ROM_LMP_8723B,
- RTL_ROM_LMP_8821A,
- RTL_ROM_LMP_8761A
- };
-
- ret = rtl_read_rom_version(hdev, &rom_version);
- if (ret)
- return -bt_to_errno(ret);
-
- min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
- if (fw->size < min_size)
- return -EINVAL;
-
- fwptr = fw->data + fw->size - sizeof(extension_sig);
- if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
- BT_ERR("%s: extension section signature mismatch", hdev->name);
- return -EINVAL;
- }
-
- /* Loop from the end of the firmware parsing instructions, until
- * we find an instruction that identifies the "project ID" for the
- * hardware supported by this firwmare file.
- * Once we have that, we double-check that that project_id is suitable
- * for the hardware we are working with.
- */
- while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
- opcode = *--fwptr;
- length = *--fwptr;
- data = *--fwptr;
-
- BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
-
- if (opcode == 0xff) /* EOF */
- break;
-
- if (length == 0) {
- BT_ERR("%s: found instruction with length 0",
- hdev->name);
- return -EINVAL;
- }
-
- if (opcode == 0 && length == 1) {
- project_id = data;
- break;
- }
-
- fwptr -= length;
- }
-
- if (project_id < 0) {
- BT_ERR("%s: failed to find version instruction", hdev->name);
- return -EINVAL;
- }
-
- if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
- BT_ERR("%s: unknown project id %d", hdev->name, project_id);
- return -EINVAL;
- }
-
- if (lmp_subver != project_id_to_lmp_subver[project_id]) {
- BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[project_id], lmp_subver);
- return -EINVAL;
- }
-
- epatch_info = (struct rtl_epatch_header *)fw->data;
- if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
- BT_ERR("%s: bad EPATCH signature", hdev->name);
- return -EINVAL;
- }
-
- num_patches = le16_to_cpu(epatch_info->num_patches);
- BT_DBG("fw_version=%x, num_patches=%d",
- le32_to_cpu(epatch_info->fw_version), num_patches);
-
- /* After the rtl_epatch_header there is a funky patch metadata section.
- * Assuming 2 patches, the layout is:
- * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
- *
- * Find the right patch for this chip.
- */
- min_size += 8 * num_patches;
- if (fw->size < min_size)
- return -EINVAL;
-
- chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
- patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
- patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
- for (i = 0; i < num_patches; i++) {
- u16 chip_id = get_unaligned_le16(chip_id_base +
- (i * sizeof(u16)));
- if (chip_id == rom_version + 1) {
- patch_length = get_unaligned_le16(patch_length_base +
- (i * sizeof(u16)));
- patch_offset = get_unaligned_le32(patch_offset_base +
- (i * sizeof(u32)));
- break;
- }
- }
-
- if (!patch_offset) {
- BT_ERR("%s: didn't find patch for chip id %d",
- hdev->name, rom_version);
- return -EINVAL;
- }
-
- BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
- min_size = patch_offset + patch_length;
- if (fw->size < min_size)
- return -EINVAL;
-
- /* Copy the firmware into a new buffer and write the version at
- * the end.
- */
- len = patch_length;
- buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
-
- *_buf = buf;
- return len;
-}
-
-static int rtl_download_firmware(struct hci_dev *hdev,
- const unsigned char *data, int fw_len)
-{
- struct rtl_download_cmd *dl_cmd;
- int frag_num = fw_len / RTL_FRAG_LEN + 1;
- int frag_len = RTL_FRAG_LEN;
- int ret = 0;
- int i;
-
- dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
- if (!dl_cmd)
- return -ENOMEM;
-
- for (i = 0; i < frag_num; i++) {
- struct rtl_download_response *dl_resp;
- struct sk_buff *skb;
-
- BT_DBG("download fw (%d/%d)", i, frag_num);
-
- dl_cmd->index = i;
- if (i == (frag_num - 1)) {
- dl_cmd->index |= 0x80; /* data end */
- frag_len = fw_len % RTL_FRAG_LEN;
- }
- memcpy(dl_cmd->data, data, frag_len);
-
- /* Send download command */
- skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
- HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- BT_ERR("%s: download fw command failed (%ld)",
- hdev->name, PTR_ERR(skb));
- ret = -PTR_ERR(skb);
- goto out;
- }
-
- if (skb->len != sizeof(*dl_resp)) {
- BT_ERR("%s: download fw event length mismatch",
- hdev->name);
- kfree_skb(skb);
- ret = -EIO;
- goto out;
- }
-
- dl_resp = (struct rtl_download_response *)skb->data;
- if (dl_resp->status != 0) {
- kfree_skb(skb);
- ret = bt_to_errno(dl_resp->status);
- goto out;
- }
-
- kfree_skb(skb);
- data += RTL_FRAG_LEN;
- }
-
-out:
- kfree(dl_cmd);
- return ret;
-}
-
-static int btusb_setup_rtl8723a(struct hci_dev *hdev)
-{
- struct btusb_data *data = dev_get_drvdata(&hdev->dev);
- struct usb_device *udev = interface_to_usbdev(data->intf);
- const struct firmware *fw;
- int ret;
-
- BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
- ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
- return ret;
- }
-
- if (fw->size < 8) {
- ret = -EINVAL;
- goto out;
- }
-
- /* Check that the firmware doesn't have the epatch signature
- * (which is only for RTL8723B and newer).
- */
- if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
- BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
- ret = -EINVAL;
- goto out;
- }
-
- ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
- const char *fw_name)
-{
- struct btusb_data *data = dev_get_drvdata(&hdev->dev);
- struct usb_device *udev = interface_to_usbdev(data->intf);
- unsigned char *fw_data = NULL;
- const struct firmware *fw;
- int ret;
-
- BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
- ret = request_firmware(&fw, fw_name, &udev->dev);
- if (ret < 0) {
- BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- return ret;
- }
-
- ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
- if (ret < 0)
- goto out;
-
- ret = rtl_download_firmware(hdev, fw_data, ret);
- kfree(fw_data);
- if (ret < 0)
- goto out;
-
-out:
- release_firmware(fw);
- return ret;
-}
-
-static int btusb_setup_realtek(struct hci_dev *hdev)
-{
- struct sk_buff *skb;
- struct hci_rp_read_local_version *resp;
- u16 lmp_subver;
-
- skb = btusb_read_local_version(hdev);
- if (IS_ERR(skb))
- return -PTR_ERR(skb);
-
- resp = (struct hci_rp_read_local_version *)skb->data;
- BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
- "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
-
- lmp_subver = le16_to_cpu(resp->lmp_subver);
- kfree_skb(skb);
-
- /* Match a set of subver values that correspond to stock firmware,
- * which is not compatible with standard btusb.
- * If matched, upload an alternative firmware that does conform to
- * standard btusb. Once that firmware is uploaded, the subver changes
- * to a different value.
- */
- switch (lmp_subver) {
- case RTL_ROM_LMP_8723A:
- case RTL_ROM_LMP_3499:
- return btusb_setup_rtl8723a(hdev);
- case RTL_ROM_LMP_8723B:
- return btusb_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8723b_fw.bin");
- case RTL_ROM_LMP_8821A:
- return btusb_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8821a_fw.bin");
- case RTL_ROM_LMP_8761A:
- return btusb_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8761a_fw.bin");
- default:
- BT_INFO("rtl: assuming no firmware upload needed.");
- return 0;
- }
-}
-
static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1951,12 +1581,6 @@ static int btusb_setup_intel(struct hci_dev *hdev)
}
ver = (struct intel_version *)skb->data;
- if (ver->status) {
- BT_ERR("%s Intel fw version event failed (%02x)", hdev->name,
- ver->status);
- kfree_skb(skb);
- return -bt_to_errno(ver->status);
- }
BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
hdev->name, ver->hw_platform, ver->hw_variant,
@@ -2004,15 +1628,6 @@ static int btusb_setup_intel(struct hci_dev *hdev)
return PTR_ERR(skb);
}
- if (skb->data[0]) {
- u8 evt_status = skb->data[0];
-
- BT_ERR("%s enable Intel manufacturer mode event failed (%02x)",
- hdev->name, evt_status);
- kfree_skb(skb);
- release_firmware(fw);
- return -bt_to_errno(evt_status);
- }
kfree_skb(skb);
disable_patch = 1;
@@ -2358,13 +1973,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
}
ver = (struct intel_version *)skb->data;
- if (ver->status) {
- BT_ERR("%s: Intel version command failure (%02x)",
- hdev->name, ver->status);
- err = -bt_to_errno(ver->status);
- kfree_skb(skb);
- return err;
- }
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
@@ -2439,13 +2047,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
}
params = (struct intel_boot_params *)skb->data;
- if (params->status) {
- BT_ERR("%s: Intel boot parameters command failure (%02x)",
- hdev->name, params->status);
- err = -bt_to_errno(params->status);
- kfree_skb(skb);
- return err;
- }
BT_INFO("%s: Device revision is %u", hdev->name,
le16_to_cpu(params->dev_revid));
@@ -2678,13 +2279,6 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
return;
}
- if (skb->data[0] != 0x00) {
- BT_ERR("%s: Exception info command failure (%02x)",
- hdev->name, skb->data[0]);
- kfree_skb(skb);
- return;
- }
-
BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
@@ -2792,6 +2386,7 @@ struct qca_device_info {
static const struct qca_device_info qca_devices_table[] = {
{ 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
{ 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
+ { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */
{ 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
{ 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
{ 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
@@ -3175,8 +2770,17 @@ static int btusb_probe(struct usb_interface *intf,
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
}
- if (id->driver_info & BTUSB_REALTEK)
- hdev->setup = btusb_setup_realtek;
+#ifdef CONFIG_BT_HCIBTUSB_RTL
+ if (id->driver_info & BTUSB_REALTEK) {
+ hdev->setup = btrtl_setup_realtek;
+
+ /* Realtek devices lose their updated firmware over suspend,
+ * but the USB hub doesn't notice any status change.
+ * Explicitly request a device reset on resume.
+ */
+ set_bit(BTUSB_RESET_RESUME, &data->flags);
+ }
+#endif
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
@@ -3308,6 +2912,14 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
+ /* Optionally request a device reset on resume, but only when
+ * wakeups are disabled. If wakeups are enabled we assume the
+ * device will stay powered up throughout suspend.
+ */
+ if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
+ !device_may_wakeup(&data->udev->dev))
+ data->udev->reset_resume = 1;
+
return 0;
}
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 55c135b7757a..7a722df97343 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -22,7 +22,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
-#define DEBUG
+
#include <linux/platform_device.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index dc8e3d4356a0..fc0056a28b81 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -47,8 +47,8 @@
#include "hci_uart.h"
-static bool txcrc = 1;
-static bool hciextn = 1;
+static bool txcrc = true;
+static bool hciextn = true;
#define BCSP_TXWINSIZE 4
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index a50936a17376..563969942a1d 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -140,12 +140,47 @@ static struct clk_regmap pll14_vote = {
},
};
+#define NSS_PLL_RATE(f, _l, _m, _n, i) \
+ { \
+ .freq = f, \
+ .l = _l, \
+ .m = _m, \
+ .n = _n, \
+ .ibits = i, \
+ }
+
+static struct pll_freq_tbl pll18_freq_tbl[] = {
+ NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625),
+ NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625),
+};
+
+static struct clk_pll pll18 = {
+ .l_reg = 0x31a4,
+ .m_reg = 0x31a8,
+ .n_reg = 0x31ac,
+ .config_reg = 0x31b4,
+ .mode_reg = 0x31a0,
+ .status_reg = 0x31b8,
+ .status_bit = 16,
+ .post_div_shift = 16,
+ .post_div_width = 1,
+ .freq_tbl = pll18_freq_tbl,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pll18",
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
enum {
P_PXO,
P_PLL8,
P_PLL3,
P_PLL0,
P_CXO,
+ P_PLL14,
+ P_PLL18,
};
static const struct parent_map gcc_pxo_pll8_map[] = {
@@ -197,6 +232,22 @@ static const char *gcc_pxo_pll8_pll0_map[] = {
"pll0_vote",
};
+static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 4 },
+ { P_PLL0, 2 },
+ { P_PLL14, 5 },
+ { P_PLL18, 1 }
+};
+
+static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = {
+ "pxo",
+ "pll8_vote",
+ "pll0_vote",
+ "pll14",
+ "pll18",
+};
+
static struct freq_tbl clk_tbl_gsbi_uart[] = {
{ 1843200, P_PLL8, 2, 6, 625 },
{ 3686400, P_PLL8, 2, 12, 625 },
@@ -2202,6 +2253,472 @@ static struct clk_branch ebi2_aon_clk = {
},
};
+static const struct freq_tbl clk_tbl_gmac[] = {
+ { 133000000, P_PLL0, 1, 50, 301 },
+ { 266000000, P_PLL0, 1, 127, 382 },
+ { }
+};
+
+static struct clk_dyn_rcg gmac_core1_src = {
+ .ns_reg[0] = 0x3cac,
+ .ns_reg[1] = 0x3cb0,
+ .md_reg[0] = 0x3ca4,
+ .md_reg[1] = 0x3ca8,
+ .bank_reg = 0x3ca0,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_gmac,
+ .clkr = {
+ .enable_reg = 0x3ca0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core1_src",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gmac_core1_clk = {
+ .halt_reg = 0x3c20,
+ .halt_bit = 4,
+ .hwcg_reg = 0x3cb4,
+ .hwcg_bit = 6,
+ .clkr = {
+ .enable_reg = 0x3cb4,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core1_clk",
+ .parent_names = (const char *[]){
+ "gmac_core1_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_dyn_rcg gmac_core2_src = {
+ .ns_reg[0] = 0x3ccc,
+ .ns_reg[1] = 0x3cd0,
+ .md_reg[0] = 0x3cc4,
+ .md_reg[1] = 0x3cc8,
+ .bank_reg = 0x3ca0,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_gmac,
+ .clkr = {
+ .enable_reg = 0x3cc0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core2_src",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gmac_core2_clk = {
+ .halt_reg = 0x3c20,
+ .halt_bit = 5,
+ .hwcg_reg = 0x3cd4,
+ .hwcg_bit = 6,
+ .clkr = {
+ .enable_reg = 0x3cd4,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core2_clk",
+ .parent_names = (const char *[]){
+ "gmac_core2_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_dyn_rcg gmac_core3_src = {
+ .ns_reg[0] = 0x3cec,
+ .ns_reg[1] = 0x3cf0,
+ .md_reg[0] = 0x3ce4,
+ .md_reg[1] = 0x3ce8,
+ .bank_reg = 0x3ce0,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_gmac,
+ .clkr = {
+ .enable_reg = 0x3ce0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core3_src",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gmac_core3_clk = {
+ .halt_reg = 0x3c20,
+ .halt_bit = 6,
+ .hwcg_reg = 0x3cf4,
+ .hwcg_bit = 6,
+ .clkr = {
+ .enable_reg = 0x3cf4,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core3_clk",
+ .parent_names = (const char *[]){
+ "gmac_core3_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_dyn_rcg gmac_core4_src = {
+ .ns_reg[0] = 0x3d0c,
+ .ns_reg[1] = 0x3d10,
+ .md_reg[0] = 0x3d04,
+ .md_reg[1] = 0x3d08,
+ .bank_reg = 0x3d00,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_gmac,
+ .clkr = {
+ .enable_reg = 0x3d00,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core4_src",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch gmac_core4_clk = {
+ .halt_reg = 0x3c20,
+ .halt_bit = 7,
+ .hwcg_reg = 0x3d14,
+ .hwcg_bit = 6,
+ .clkr = {
+ .enable_reg = 0x3d14,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gmac_core4_clk",
+ .parent_names = (const char *[]){
+ "gmac_core4_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_nss_tcm[] = {
+ { 266000000, P_PLL0, 3, 0, 0 },
+ { 400000000, P_PLL0, 2, 0, 0 },
+ { }
+};
+
+static struct clk_dyn_rcg nss_tcm_src = {
+ .ns_reg[0] = 0x3dc4,
+ .ns_reg[1] = 0x3dc8,
+ .bank_reg = 0x3dc0,
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 4,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_nss_tcm,
+ .clkr = {
+ .enable_reg = 0x3dc0,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_tcm_src",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_tcm_clk = {
+ .halt_reg = 0x3c20,
+ .halt_bit = 14,
+ .clkr = {
+ .enable_reg = 0x3dd0,
+ .enable_mask = BIT(6) | BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "nss_tcm_clk",
+ .parent_names = (const char *[]){
+ "nss_tcm_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl clk_tbl_nss[] = {
+ { 110000000, P_PLL18, 1, 1, 5 },
+ { 275000000, P_PLL18, 2, 0, 0 },
+ { 550000000, P_PLL18, 1, 0, 0 },
+ { 733000000, P_PLL18, 1, 0, 0 },
+ { }
+};
+
+static struct clk_dyn_rcg ubi32_core1_src_clk = {
+ .ns_reg[0] = 0x3d2c,
+ .ns_reg[1] = 0x3d30,
+ .md_reg[0] = 0x3d24,
+ .md_reg[1] = 0x3d28,
+ .bank_reg = 0x3d20,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_nss,
+ .clkr = {
+ .enable_reg = 0x3d20,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "ubi32_core1_src_clk",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+ },
+};
+
+static struct clk_dyn_rcg ubi32_core2_src_clk = {
+ .ns_reg[0] = 0x3d4c,
+ .ns_reg[1] = 0x3d50,
+ .md_reg[0] = 0x3d44,
+ .md_reg[1] = 0x3d48,
+ .bank_reg = 0x3d40,
+ .mn[0] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .mn[1] = {
+ .mnctr_en_bit = 8,
+ .mnctr_reset_bit = 7,
+ .mnctr_mode_shift = 5,
+ .n_val_shift = 16,
+ .m_val_shift = 16,
+ .width = 8,
+ },
+ .s[0] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .s[1] = {
+ .src_sel_shift = 0,
+ .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+ },
+ .p[0] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .p[1] = {
+ .pre_div_shift = 3,
+ .pre_div_width = 2,
+ },
+ .mux_sel_bit = 0,
+ .freq_tbl = clk_tbl_nss,
+ .clkr = {
+ .enable_reg = 0x3d40,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "ubi32_core2_src_clk",
+ .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+ .num_parents = 5,
+ .ops = &clk_dyn_rcg_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+ },
+};
+
static struct clk_regmap *gcc_ipq806x_clks[] = {
[PLL0] = &pll0.clkr,
[PLL0_VOTE] = &pll0_vote,
@@ -2211,6 +2728,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[PLL8_VOTE] = &pll8_vote,
[PLL14] = &pll14.clkr,
[PLL14_VOTE] = &pll14_vote,
+ [PLL18] = &pll18.clkr,
[GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
[GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
[GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
@@ -2307,6 +2825,18 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
[EBI2_CLK] = &ebi2_clk.clkr,
[EBI2_AON_CLK] = &ebi2_aon_clk.clkr,
+ [GMAC_CORE1_CLK_SRC] = &gmac_core1_src.clkr,
+ [GMAC_CORE1_CLK] = &gmac_core1_clk.clkr,
+ [GMAC_CORE2_CLK_SRC] = &gmac_core2_src.clkr,
+ [GMAC_CORE2_CLK] = &gmac_core2_clk.clkr,
+ [GMAC_CORE3_CLK_SRC] = &gmac_core3_src.clkr,
+ [GMAC_CORE3_CLK] = &gmac_core3_clk.clkr,
+ [GMAC_CORE4_CLK_SRC] = &gmac_core4_src.clkr,
+ [GMAC_CORE4_CLK] = &gmac_core4_clk.clkr,
+ [UBI32_CORE1_CLK_SRC] = &ubi32_core1_src_clk.clkr,
+ [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
+ [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
+ [NSSTCM_CLK] = &nss_tcm_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq806x_resets[] = {
@@ -2425,6 +2955,48 @@ static const struct qcom_reset_map gcc_ipq806x_resets[] = {
[USB30_1_PHY_RESET] = { 0x3b58, 0 },
[NSSFB0_RESET] = { 0x3b60, 6 },
[NSSFB1_RESET] = { 0x3b60, 7 },
+ [UBI32_CORE1_CLKRST_CLAMP_RESET] = { 0x3d3c, 3},
+ [UBI32_CORE1_CLAMP_RESET] = { 0x3d3c, 2 },
+ [UBI32_CORE1_AHB_RESET] = { 0x3d3c, 1 },
+ [UBI32_CORE1_AXI_RESET] = { 0x3d3c, 0 },
+ [UBI32_CORE2_CLKRST_CLAMP_RESET] = { 0x3d5c, 3 },
+ [UBI32_CORE2_CLAMP_RESET] = { 0x3d5c, 2 },
+ [UBI32_CORE2_AHB_RESET] = { 0x3d5c, 1 },
+ [UBI32_CORE2_AXI_RESET] = { 0x3d5c, 0 },
+ [GMAC_CORE1_RESET] = { 0x3cbc, 0 },
+ [GMAC_CORE2_RESET] = { 0x3cdc, 0 },
+ [GMAC_CORE3_RESET] = { 0x3cfc, 0 },
+ [GMAC_CORE4_RESET] = { 0x3d1c, 0 },
+ [GMAC_AHB_RESET] = { 0x3e24, 0 },
+ [NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 },
+ [NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 },
+ [NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 },
+ [NSS_CH0_HW_RST_RX_125M_N_RESET] = { 0x3b60, 3 },
+ [NSS_CH0_RST_TX_125M_N_RESET] = { 0x3b60, 4 },
+ [NSS_CH1_RST_RX_CLK_N_RESET] = { 0x3b60, 5 },
+ [NSS_CH1_RST_TX_CLK_N_RESET] = { 0x3b60, 6 },
+ [NSS_CH1_RST_RX_125M_N_RESET] = { 0x3b60, 7 },
+ [NSS_CH1_HW_RST_RX_125M_N_RESET] = { 0x3b60, 8 },
+ [NSS_CH1_RST_TX_125M_N_RESET] = { 0x3b60, 9 },
+ [NSS_CH2_RST_RX_CLK_N_RESET] = { 0x3b60, 10 },
+ [NSS_CH2_RST_TX_CLK_N_RESET] = { 0x3b60, 11 },
+ [NSS_CH2_RST_RX_125M_N_RESET] = { 0x3b60, 12 },
+ [NSS_CH2_HW_RST_RX_125M_N_RESET] = { 0x3b60, 13 },
+ [NSS_CH2_RST_TX_125M_N_RESET] = { 0x3b60, 14 },
+ [NSS_CH3_RST_RX_CLK_N_RESET] = { 0x3b60, 15 },
+ [NSS_CH3_RST_TX_CLK_N_RESET] = { 0x3b60, 16 },
+ [NSS_CH3_RST_RX_125M_N_RESET] = { 0x3b60, 17 },
+ [NSS_CH3_HW_RST_RX_125M_N_RESET] = { 0x3b60, 18 },
+ [NSS_CH3_RST_TX_125M_N_RESET] = { 0x3b60, 19 },
+ [NSS_RST_RX_250M_125M_N_RESET] = { 0x3b60, 20 },
+ [NSS_RST_TX_250M_125M_N_RESET] = { 0x3b60, 21 },
+ [NSS_QSGMII_TXPI_RST_N_RESET] = { 0x3b60, 22 },
+ [NSS_QSGMII_CDR_RST_N_RESET] = { 0x3b60, 23 },
+ [NSS_SGMII2_CDR_RST_N_RESET] = { 0x3b60, 24 },
+ [NSS_SGMII3_CDR_RST_N_RESET] = { 0x3b60, 25 },
+ [NSS_CAL_PRBS_RST_N_RESET] = { 0x3b60, 26 },
+ [NSS_LCKDT_RST_N_RESET] = { 0x3b60, 27 },
+ [NSS_SRDS_N_RESET] = { 0x3b60, 28 },
};
static const struct regmap_config gcc_ipq806x_regmap_config = {
@@ -2453,6 +3025,8 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
{
struct clk *clk;
struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int ret;
/* Temporary until RPM clocks supported */
clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000);
@@ -2463,7 +3037,25 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
- return qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+ ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+ if (ret)
+ return ret;
+
+ regmap = dev_get_regmap(dev, NULL);
+ if (!regmap)
+ return -ENODEV;
+
+ /* Setup PLL18 static bits */
+ regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400);
+ regmap_write(regmap, 0x31b0, 0x3080);
+
+ /* Set GMAC footswitch sleep/wakeup values */
+ regmap_write(regmap, 0x3cb8, 8);
+ regmap_write(regmap, 0x3cd8, 8);
+ regmap_write(regmap, 0x3cf8, 8);
+ regmap_write(regmap, 0x3d18, 8);
+
+ return 0;
}
static int gcc_ipq806x_remove(struct platform_device *pdev)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 66bd6a2ad83b..d95a0c300b03 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -445,10 +445,10 @@ static int c4iw_get_mib(struct ib_device *ibdev,
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
memset(stats, 0, sizeof *stats);
- stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
- stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
- stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
- stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
+ stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
+ stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
+ stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
+ stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0f00204d2ece..21cb41a60fe8 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -189,7 +189,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
{
int i;
u64 guid_indexes;
- int slave_id;
+ int slave_id, slave_port;
enum slave_port_state new_state;
enum slave_port_state prev_state;
__be64 tmp_cur_ag, form_cache_ag;
@@ -217,6 +217,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
if (slave_id >= dev->dev->persist->num_vfs + 1)
return;
+
+ slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
+ if (slave_port < 0) /* this port isn't available for the VF */
+ continue;
+
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,
(NUM_ALIAS_GUID_IN_REC * block_num) + i);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9cd2b002d7ae..ad6a8818608d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1365,14 +1365,17 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
* stadard address handle by decoding the tunnelled mlx4_ah fields */
memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
ah.ibah.device = ctx->ib_dev;
+
+ port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
+ port = mlx4_slave_convert_port(dev->dev, slave, port);
+ if (port < 0)
+ return;
+ ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+
mlx4_ib_query_ah(&ah.ibah, &ah_attr);
if (ah_attr.ah_flags & IB_AH_GRH)
fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
- port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
- if (port < 0)
- return;
- ah_attr.port_num = port;
memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
/* if slave have default vlan use it */
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index cc64400d41ac..024b0f745035 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1090,7 +1090,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_NATIVE);
+ MLX4_CMD_WRAPPED);
if (ret == -ENOMEM)
pr_err("mcg table is full. Fail to register network rule.\n");
else if (ret == -ENXIO)
@@ -1107,7 +1107,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
int err;
err = mlx4_cmd(dev, reg_id, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
- MLX4_CMD_NATIVE);
+ MLX4_CMD_WRAPPED);
if (err)
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
@@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
- char name[80];
- int eq_per_port = 0;
- int added_eqs = 0;
- int total_eqs = 0;
- int i, j, eq;
-
- /* Legacy mode or comp_pool is not large enough */
- if (dev->caps.comp_pool == 0 ||
- dev->caps.num_ports > dev->caps.comp_pool)
- return;
-
- eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
-
- /* Init eq table */
- added_eqs = 0;
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- added_eqs += eq_per_port;
-
- total_eqs = dev->caps.num_comp_vectors + added_eqs;
+ int i, j, eq = 0, total_eqs = 0;
- ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+ ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
+ sizeof(ibdev->eq_table[0]), GFP_KERNEL);
if (!ibdev->eq_table)
return;
- ibdev->eq_added = added_eqs;
-
- eq = 0;
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
- for (j = 0; j < eq_per_port; j++) {
- snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
- i, j, dev->persist->pdev->bus->name);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(dev, name, NULL,
- &ibdev->eq_table[eq])) {
- /* Use legacy (same as mlx4_en driver) */
- pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
- ibdev->eq_table[eq] =
- (eq % dev->caps.num_comp_vectors);
- }
- eq++;
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
+ j++, total_eqs++) {
+ if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
+ continue;
+ ibdev->eq_table[eq] = total_eqs;
+ if (!mlx4_assign_eq(dev, i,
+ &ibdev->eq_table[eq]))
+ eq++;
+ else
+ ibdev->eq_table[eq] = -1;
}
}
- /* Fill the reset of the vector with legacy EQ */
- for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
- ibdev->eq_table[eq++] = i;
+ for (i = eq; i < dev->caps.num_comp_vectors;
+ ibdev->eq_table[i++] = -1)
+ ;
/* Advertise the new number of EQs to clients */
- ibdev->ib_dev.num_comp_vectors = total_eqs;
+ ibdev->ib_dev.num_comp_vectors = eq;
}
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
int i;
+ int total_eqs = ibdev->ib_dev.num_comp_vectors;
- /* no additional eqs were added */
+ /* no eqs were allocated */
if (!ibdev->eq_table)
return;
/* Reset the advertised EQ number */
- ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+ ibdev->ib_dev.num_comp_vectors = 0;
- /* Free only the added eqs */
- for (i = 0; i < ibdev->eq_added; i++) {
- /* Don't free legacy eqs if used */
- if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
- continue;
+ for (i = 0; i < total_eqs; i++)
mlx4_release_eq(dev, ibdev->eq_table[i]);
- }
kfree(ibdev->eq_table);
+ ibdev->eq_table = NULL;
}
static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index fce3934372a1..ef80e6c99a68 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -523,7 +523,6 @@ struct mlx4_ib_dev {
struct mlx4_ib_iboe iboe;
int counters[MLX4_MAX_PORTS];
int *eq_table;
- int eq_added;
struct kobject *iov_parent;
struct kobject *ports_parent;
struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 10df386c6344..bce263b92821 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,8 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI
- select NET_VENDOR_MELLANOX
- select MLX5_CORE
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
---help---
This driver provides low-level InfiniBand support for
Mellanox Connect-IB PCI Express host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 2ee6b1051975..e2bea9ab93b3 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -590,8 +590,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
int err;
- err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
- PAGE_SIZE * 2, &buf->buf);
+ err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
if (err)
return err;
@@ -754,7 +753,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-EINVAL);
entries = roundup_pow_of_two(entries + 1);
- if (entries > dev->mdev->caps.gen.max_cqes)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -921,7 +920,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
u32 fsel;
- if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+ if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1076,7 +1075,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int uninitialized_var(cqe_size);
unsigned long flags;
- if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+ if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
@@ -1085,7 +1084,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -EINVAL;
entries = roundup_pow_of_two(entries + 1);
- if (entries > dev->mdev->caps.gen.max_cqes + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9cf9a37bb5ff..a770490ebbf1 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
- dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
+ dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
@@ -137,3 +137,300 @@ out:
kfree(out_mad);
return err;
}
+
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
+{
+ struct ib_smp *in_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ if (!in_mad)
+ return -ENOMEM;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
+ out_mad);
+
+ kfree(in_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(sys_image_guid, out_mad->data + 4, 8);
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_desc, out_mad->data, 64);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_guid, out_mad->data + 12, 8);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
+ in_mad->attr_mod = cpu_to_be32(index / 32);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw, out_mad->data + 8, 8);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
+ in_mad->attr_mod = cpu_to_be32(index / 8);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int ext_active_speed;
+ int err = -ENOMEM;
+
+ if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ mlx5_ib_warn(dev, "invalid port number %d\n", port);
+ return -EINVAL;
+ }
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ memset(props, 0, sizeof(*props));
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto out;
+ }
+
+ props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
+ props->lmc = out_mad->data[34] & 0x7;
+ props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
+ props->sm_sl = out_mad->data[36] & 0xf;
+ props->state = out_mad->data[32] & 0xf;
+ props->phys_state = out_mad->data[33] >> 4;
+ props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
+ props->gid_tbl_len = out_mad->data[50];
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
+ props->active_width = out_mad->data[31] & 0xf;
+ props->active_speed = out_mad->data[35] >> 4;
+ props->max_mtu = out_mad->data[41] & 0xf;
+ props->active_mtu = out_mad->data[36] >> 4;
+ props->subnet_timeout = out_mad->data[51] & 0x1f;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = out_mad->data[41] >> 4;
+
+ /* Check if extended speeds (EDR/FDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+ ext_active_speed = out_mad->data[62] >> 4;
+
+ switch (ext_active_speed) {
+ case 1:
+ props->active_speed = 16; /* FDR */
+ break;
+ case 2:
+ props->active_speed = 32; /* EDR */
+ break;
+ }
+ }
+
+ /* If reported active speed is QDR, check if is FDR-10 */
+ if (props->active_speed == 4) {
+ if (mdev->port_caps[port - 1].ext_port_cap &
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+ init_query_mad(in_mad);
+ in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port,
+ NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ /* Checking LinkSpeedActive for FDR-10 */
+ if (out_mad->data[15] & 0x1)
+ props->active_speed = 8;
+ }
+ }
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 57c9809e8b87..79dadd627e9c 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -40,6 +40,7 @@
#include <linux/io-mapping.h>
#include <linux/sched.h>
#include <rdma/ib_user_verbs.h>
+#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include "user.h"
@@ -62,32 +63,168 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+ case MLX5_CAP_PORT_TYPE_IB:
+ return IB_LINK_LAYER_INFINIBAND;
+ case MLX5_CAP_PORT_TYPE_ETH:
+ return IB_LINK_LAYER_ETHERNET;
+ default:
+ return IB_LINK_LAYER_UNSPECIFIED;
+ }
+}
+
+static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+{
+ return !dev->mdev->issi;
+}
+
+enum {
+ MLX5_VPORT_ACCESS_METHOD_MAD,
+ MLX5_VPORT_ACCESS_METHOD_HCA,
+ MLX5_VPORT_ACCESS_METHOD_NIC,
+};
+
+static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+{
+ if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+ return MLX5_VPORT_ACCESS_METHOD_MAD;
+
+ if (mlx5_ib_port_link_layer(ibdev) ==
+ IB_LINK_LAYER_ETHERNET)
+ return MLX5_VPORT_ACCESS_METHOD_NIC;
+
+ return MLX5_VPORT_ACCESS_METHOD_HCA;
+}
+
+static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_system_image_guid(ibdev,
+ sys_image_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ if (!err)
+ *sys_image_guid = cpu_to_be64(tmp);
+ return err;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+ pkey_table_size));
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+ __be64 *node_guid)
+{
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_node_guid(dev, node_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+ if (!err)
+ *node_guid = cpu_to_be64(tmp);
+ return err;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+struct mlx5_reg_node_desc {
+ u8 desc[64];
+};
+
+static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct mlx5_reg_node_desc in;
+
+ if (mlx5_use_mad_ifc(dev))
+ return mlx5_query_mad_ifc_node_desc(dev, node_desc);
+
+ memset(&in, 0, sizeof(in));
+
+ return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+ sizeof(struct mlx5_reg_node_desc),
+ MLX5_REG_NODE_DESC, 0, 0);
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- struct mlx5_general_caps *gen;
+ struct mlx5_core_dev *mdev = dev->mdev;
int err = -ENOMEM;
int max_rq_sg;
int max_sq_sg;
- u64 flags;
- gen = &dev->mdev->caps.gen;
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+ memset(props, 0, sizeof(*props));
+ err = mlx5_query_system_image_guid(ibdev,
+ &props->sys_image_guid);
+ if (err)
+ return err;
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
if (err)
- goto out;
+ return err;
- memset(props, 0, sizeof(*props));
+ err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+ if (err)
+ return err;
props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
(fw_rev_min(dev->mdev) << 16) |
@@ -96,18 +233,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN;
- flags = gen->flags;
- if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
+
+ if (MLX5_CAP_GEN(mdev, pkv))
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
- if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
+ if (MLX5_CAP_GEN(mdev, qkv))
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
- if (flags & MLX5_DEV_CAP_FLAG_APM)
+ if (MLX5_CAP_GEN(mdev, apm))
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
- if (flags & MLX5_DEV_CAP_FLAG_XRC)
+ if (MLX5_CAP_GEN(mdev, xrc))
props->device_cap_flags |= IB_DEVICE_XRC;
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
- if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) {
+ if (MLX5_CAP_GEN(mdev, sho)) {
props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
/* At this stage no support for signature handover */
props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
@@ -116,221 +253,270 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
IB_GUARD_T10DIF_CSUM;
}
- if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)
+ if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
- 0xffffff;
- props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30));
- props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32));
- memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
+ props->vendor_part_id = mdev->pdev->device;
+ props->hw_ver = mdev->pdev->revision;
props->max_mr_size = ~0ull;
- props->page_size_cap = gen->min_page_sz;
- props->max_qp = 1 << gen->log_max_qp;
- props->max_qp_wr = gen->max_wqes;
- max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
- max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
- sizeof(struct mlx5_wqe_data_seg);
+ props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+ props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
+ max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
+ sizeof(struct mlx5_wqe_ctrl_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_cq = 1 << gen->log_max_cq;
- props->max_cqe = gen->max_cqes - 1;
- props->max_mr = 1 << gen->log_max_mkey;
- props->max_pd = 1 << gen->log_max_pd;
- props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp;
- props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
- props->max_srq = 1 << gen->log_max_srq;
- props->max_srq_wr = gen->max_srq_wqes - 1;
- props->local_ca_ack_delay = gen->local_ca_ack_delay;
+ props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+ props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+ props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+ props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
+ props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
+ props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
+ props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len = (unsigned int)-1;
- props->local_ca_ack_delay = gen->local_ca_ack_delay;
props->atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_NONE;
- props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
- props->max_mcast_grp = 1 << gen->log_max_mcg;
- props->max_mcast_qp_attach = gen->max_qp_mcg;
+ props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
+ props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ if (MLX5_CAP_GEN(mdev, pg))
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
props->odp_caps = dev->odp_caps;
#endif
-out:
- kfree(in_mad);
- kfree(out_mad);
-
- return err;
+ return 0;
}
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
- struct ib_port_attr *props)
+enum mlx5_ib_width {
+ MLX5_IB_WIDTH_1X = 1 << 0,
+ MLX5_IB_WIDTH_2X = 1 << 1,
+ MLX5_IB_WIDTH_4X = 1 << 2,
+ MLX5_IB_WIDTH_8X = 1 << 3,
+ MLX5_IB_WIDTH_12X = 1 << 4
+};
+
+static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+ u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- struct mlx5_general_caps *gen;
- int ext_active_speed;
- int err = -ENOMEM;
-
- gen = &dev->mdev->caps.gen;
- if (port < 1 || port > gen->num_ports) {
- mlx5_ib_warn(dev, "invalid port number %d\n", port);
- return -EINVAL;
+ int err = 0;
+
+ if (active_width & MLX5_IB_WIDTH_1X) {
+ *ib_width = IB_WIDTH_1X;
+ } else if (active_width & MLX5_IB_WIDTH_2X) {
+ mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
+ (int)active_width);
+ err = -EINVAL;
+ } else if (active_width & MLX5_IB_WIDTH_4X) {
+ *ib_width = IB_WIDTH_4X;
+ } else if (active_width & MLX5_IB_WIDTH_8X) {
+ *ib_width = IB_WIDTH_8X;
+ } else if (active_width & MLX5_IB_WIDTH_12X) {
+ *ib_width = IB_WIDTH_12X;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+ (int)active_width);
+ err = -EINVAL;
}
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- memset(props, 0, sizeof(*props));
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ return err;
+}
- err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
- if (err) {
- mlx5_ib_warn(dev, "err %d\n", err);
- goto out;
+static int mlx5_mtu_to_ib_mtu(int mtu)
+{
+ switch (mtu) {
+ case 256: return 1;
+ case 512: return 2;
+ case 1024: return 3;
+ case 2048: return 4;
+ case 4096: return 5;
+ default:
+ pr_warn("invalid mtu\n");
+ return -1;
}
+}
+enum ib_max_vl_num {
+ __IB_MAX_VL_0 = 1,
+ __IB_MAX_VL_0_1 = 2,
+ __IB_MAX_VL_0_3 = 3,
+ __IB_MAX_VL_0_7 = 4,
+ __IB_MAX_VL_0_14 = 5,
+};
- props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
- props->lmc = out_mad->data[34] & 0x7;
- props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
- props->sm_sl = out_mad->data[36] & 0xf;
- props->state = out_mad->data[32] & 0xf;
- props->phys_state = out_mad->data[33] >> 4;
- props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
- props->gid_tbl_len = out_mad->data[50];
- props->max_msg_sz = 1 << gen->log_max_msg;
- props->pkey_tbl_len = gen->port[port - 1].pkey_table_len;
- props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
- props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
- props->active_width = out_mad->data[31] & 0xf;
- props->active_speed = out_mad->data[35] >> 4;
- props->max_mtu = out_mad->data[41] & 0xf;
- props->active_mtu = out_mad->data[36] >> 4;
- props->subnet_timeout = out_mad->data[51] & 0x1f;
- props->max_vl_num = out_mad->data[37] >> 4;
- props->init_type_reply = out_mad->data[41] >> 4;
-
- /* Check if extended speeds (EDR/FDR/...) are supported */
- if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
- ext_active_speed = out_mad->data[62] >> 4;
-
- switch (ext_active_speed) {
- case 1:
- props->active_speed = 16; /* FDR */
- break;
- case 2:
- props->active_speed = 32; /* EDR */
- break;
- }
- }
+enum mlx5_vl_hw_cap {
+ MLX5_VL_HW_0 = 1,
+ MLX5_VL_HW_0_1 = 2,
+ MLX5_VL_HW_0_2 = 3,
+ MLX5_VL_HW_0_3 = 4,
+ MLX5_VL_HW_0_4 = 5,
+ MLX5_VL_HW_0_5 = 6,
+ MLX5_VL_HW_0_6 = 7,
+ MLX5_VL_HW_0_7 = 8,
+ MLX5_VL_HW_0_14 = 15
+};
- /* If reported active speed is QDR, check if is FDR-10 */
- if (props->active_speed == 4) {
- if (gen->ext_port_cap[port - 1] &
- MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
- init_query_mad(in_mad);
- in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
-
- err = mlx5_MAD_IFC(dev, 1, 1, port,
- NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
-
- /* Checking LinkSpeedActive for FDR-10 */
- if (out_mad->data[15] & 0x1)
- props->active_speed = 8;
- }
- }
+static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
+ u8 *max_vl_num)
+{
+ switch (vl_hw_cap) {
+ case MLX5_VL_HW_0:
+ *max_vl_num = __IB_MAX_VL_0;
+ break;
+ case MLX5_VL_HW_0_1:
+ *max_vl_num = __IB_MAX_VL_0_1;
+ break;
+ case MLX5_VL_HW_0_3:
+ *max_vl_num = __IB_MAX_VL_0_3;
+ break;
+ case MLX5_VL_HW_0_7:
+ *max_vl_num = __IB_MAX_VL_0_7;
+ break;
+ case MLX5_VL_HW_0_14:
+ *max_vl_num = __IB_MAX_VL_0_14;
+ break;
-out:
- kfree(in_mad);
- kfree(out_mad);
+ default:
+ return -EINVAL;
+ }
- return err;
+ return 0;
}
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
- union ib_gid *gid)
+static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int max_mtu;
+ int oper_mtu;
+ int err;
+ u8 ib_link_width_oper;
+ u8 vl_hw_cap;
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep) {
+ err = -ENOMEM;
goto out;
+ }
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ memset(props, 0, sizeof(*props));
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
if (err)
goto out;
- memcpy(gid->raw, out_mad->data + 8, 8);
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
- in_mad->attr_mod = cpu_to_be32(index / 8);
+ props->lid = rep->lid;
+ props->lmc = rep->lmc;
+ props->sm_lid = rep->sm_lid;
+ props->sm_sl = rep->sm_sl;
+ props->state = rep->vport_state;
+ props->phys_state = rep->port_physical_state;
+ props->port_cap_flags = rep->cap_mask1;
+ props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
+ props->bad_pkey_cntr = rep->pkey_violation_counter;
+ props->qkey_viol_cntr = rep->qkey_violation_counter;
+ props->subnet_timeout = rep->subnet_timeout;
+ props->init_type_reply = rep->init_type_reply;
+
+ err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
+ if (err)
+ goto out;
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+ err = translate_active_width(ibdev, ib_link_width_oper,
+ &props->active_width);
+ if (err)
+ goto out;
+ err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
+ port);
if (err)
goto out;
- memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+ mlx5_query_port_max_mtu(mdev, &max_mtu, port);
+
+ props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
+
+ mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
+
+ props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
+ err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
+ if (err)
+ goto out;
+
+ err = translate_max_vl_num(ibdev, vl_hw_cap,
+ &props->max_vl_num);
out:
- kfree(in_mad);
- kfree(out_mad);
+ kfree(rep);
return err;
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_port(ibdev, port, props);
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_hca_port(ibdev, port, props);
+
+ default:
+ return -EINVAL;
+ }
+}
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
- in_mad->attr_mod = cpu_to_be32(index / 32);
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
- err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
- *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
+
+ default:
+ return -EINVAL;
+ }
-out:
- kfree(in_mad);
- kfree(out_mad);
- return err;
}
-struct mlx5_reg_node_desc {
- u8 desc[64];
-};
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
+ pkey);
+ default:
+ return -EINVAL;
+ }
+}
static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
@@ -392,7 +578,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
- struct mlx5_general_caps *gen;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
@@ -403,7 +588,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int i;
size_t reqlen;
- gen = &dev->mdev->caps.gen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
@@ -436,14 +620,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
- resp.qp_tab_size = 1 << gen->log_max_qp;
- resp.bf_reg_size = gen->bf_reg_size;
- resp.cache_line_size = L1_CACHE_BYTES;
- resp.max_sq_desc_sz = gen->max_sq_desc_sz;
- resp.max_rq_desc_sz = gen->max_rq_desc_sz;
- resp.max_send_wqebb = gen->max_wqes;
- resp.max_recv_wr = gen->max_wqes;
- resp.max_srq_recv_wr = gen->max_srq_wqes;
+ resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+ resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ resp.cache_line_size = L1_CACHE_BYTES;
+ resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+ resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+ resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
@@ -493,7 +677,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->db_page_mutex);
resp.tot_uuars = req.total_num_uuars;
- resp.num_ports = gen->num_ports;
+ resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
err = ib_copy_to_udata(udata, &resp,
sizeof(resp) - sizeof(resp.reserved));
if (err)
@@ -731,37 +915,15 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int init_node_data(struct mlx5_ib_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
-
- in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
- out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
- if (!in_mad || !out_mad)
- goto out;
-
- init_query_mad(in_mad);
- in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
-
- err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
- if (err)
- goto out;
-
- memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
-
- in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+ int err;
- err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
if (err)
- goto out;
+ return err;
- dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
- memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+ dev->mdev->rev_id = dev->mdev->pdev->revision;
-out:
- kfree(in_mad);
- kfree(out_mad);
- return err;
+ return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
@@ -895,11 +1057,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
- struct mlx5_general_caps *gen;
int port;
- gen = &dev->mdev->caps.gen;
- for (port = 1; port <= gen->num_ports; port++)
+ for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
mlx5_query_ext_port_caps(dev, port);
}
@@ -907,11 +1067,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
- struct mlx5_general_caps *gen;
int err = -ENOMEM;
int port;
- gen = &dev->mdev->caps.gen;
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
goto out;
@@ -926,14 +1084,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= gen->num_ports; port++) {
+ for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
+ mlx5_ib_warn(dev, "query_port %d failed %d\n",
+ port, err);
break;
}
- gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
- gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
+ dev->mdev->port_caps[port - 1].pkey_table_len =
+ dprops->max_pkeys;
+ dev->mdev->port_caps[port - 1].gid_table_len =
+ pprops->gid_tbl_len;
mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
dprops->max_pkeys, pprops->gid_tbl_len);
}
@@ -1159,8 +1320,29 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_BASIC;
+ devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
+ if (IS_ERR(devr->s1)) {
+ ret = PTR_ERR(devr->s1);
+ goto error5;
+ }
+ devr->s1->device = &dev->ib_dev;
+ devr->s1->pd = devr->p0;
+ devr->s1->uobject = NULL;
+ devr->s1->event_handler = NULL;
+ devr->s1->srq_context = NULL;
+ devr->s1->srq_type = IB_SRQT_BASIC;
+ devr->s1->ext.xrc.cq = devr->c0;
+ atomic_inc(&devr->p0->usecnt);
+ atomic_set(&devr->s0->usecnt, 0);
+
return 0;
+error5:
+ mlx5_ib_destroy_srq(devr->s0);
error4:
mlx5_ib_dealloc_xrcd(devr->x1);
error3:
@@ -1175,6 +1357,7 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
+ mlx5_ib_destroy_srq(devr->s1);
mlx5_ib_destroy_srq(devr->s0);
mlx5_ib_dealloc_xrcd(devr->x0);
mlx5_ib_dealloc_xrcd(devr->x1);
@@ -1188,6 +1371,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
int err;
int i;
+ /* don't create IB instance over Eth ports, no RoCE yet! */
+ if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
+ return NULL;
+
printk_once(KERN_INFO "%s", mlx5_version);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -1200,15 +1387,16 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_dealloc;
- get_ext_port_caps(dev);
+ if (mlx5_use_mad_ifc(dev))
+ get_ext_port_caps(dev);
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
- dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey;
- dev->num_ports = mdev->caps.gen.num_ports;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
+ dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
dev->mdev->priv.eq_table.num_comp_vectors;
@@ -1286,9 +1474,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
- mlx5_ib_internal_query_odp_caps(dev);
+ mlx5_ib_internal_fill_odp_caps(dev);
- if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
+ if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
dev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index dff1cfcdf476..873dc354766a 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -415,6 +415,7 @@ struct mlx5_ib_resources {
struct ib_xrcd *x1;
struct ib_pd *p0;
struct ib_srq *s0;
+ struct ib_srq *s1;
};
struct mlx5_ib_dev {
@@ -594,6 +595,22 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad);
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid);
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys);
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id);
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
@@ -617,7 +634,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
extern struct workqueue_struct *mlx5_ib_page_fault_wq;
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
struct mlx5_ib_pfault *pfault);
void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
@@ -631,9 +648,9 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
unsigned long end);
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
- return 0;
+ return;
}
static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 71c593583864..bc9a0de897cb 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -975,8 +975,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
struct mlx5_ib_mr *mr;
int inlen;
int err;
- bool pg_cap = !!(dev->mdev->caps.gen.flags &
- MLX5_DEV_CAP_FLAG_ON_DMND_PG);
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5099db08afd2..aa8391e75385 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -109,40 +109,33 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
ib_umem_odp_unmap_dma_pages(umem, start, end);
}
-#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
- if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
- ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
-} while (0)
-
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
- int err;
- struct mlx5_odp_caps hw_caps;
struct ib_odp_caps *caps = &dev->odp_caps;
memset(caps, 0, sizeof(*caps));
- if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
- return 0;
-
- err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
- if (err)
- goto out;
+ if (!MLX5_CAP_GEN(dev->mdev, pg))
+ return;
caps->general_caps = IB_ODP_SUPPORT;
- COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
- SEND);
- COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
- SEND);
- COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
- RECV);
- COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
- WRITE);
- COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
- READ);
-
-out:
- return err;
+
+ if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+ caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+ return;
}
static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d35f62d4f4c5..203c8a45e095 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
{
- struct mlx5_general_caps *gen;
int wqe_size;
int wq_size;
- gen = &dev->mdev->caps.gen;
/* Sanity check RQ size before proceeding */
- if (cap->max_recv_wr > gen->max_wqes)
+ if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
return -EINVAL;
if (!has_rq) {
@@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
qp->rq.wqe_cnt = wq_size / wqe_size;
- if (wqe_size > gen->max_rq_desc_sz) {
+ if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
wqe_size,
- gen->max_rq_desc_sz);
+ MLX5_CAP_GEN(dev->mdev,
+ max_wqe_sz_rq));
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
@@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
- struct mlx5_general_caps *gen;
int wqe_size;
int wq_size;
- gen = &dev->mdev->caps.gen;
if (!attr->cap.max_send_wr)
return 0;
@@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
if (wqe_size < 0)
return wqe_size;
- if (wqe_size > gen->max_sq_desc_sz) {
+ if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
- wqe_size, gen->max_sq_desc_sz);
+ wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
return -EINVAL;
}
@@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
- if (qp->sq.wqe_cnt > gen->max_wqes) {
+ if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
- qp->sq.wqe_cnt, gen->max_wqes);
+ qp->sq.wqe_cnt,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct mlx5_ib_create_qp *ucmd)
{
- struct mlx5_general_caps *gen;
int desc_sz = 1 << qp->sq.wqe_shift;
- gen = &dev->mdev->caps.gen;
- if (desc_sz > gen->max_sq_desc_sz) {
+ if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
- desc_sz, gen->max_sq_desc_sz);
+ desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
return -EINVAL;
}
@@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
qp->sq.wqe_cnt = ucmd->sq_wqe_count;
- if (qp->sq.wqe_cnt > gen->max_wqes) {
+ if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
- qp->sq.wqe_cnt, gen->max_wqes);
+ qp->sq.wqe_cnt,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -EINVAL;
}
@@ -768,7 +765,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
- err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+ err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
goto err_uuar;
@@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_resources *devr = &dev->devr;
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_create_qp_resp resp;
struct mlx5_create_qp_mbox_in *in;
- struct mlx5_general_caps *gen;
struct mlx5_ib_create_qp ucmd;
int inlen = sizeof(*in);
int err;
mlx5_ib_odp_create_qp(qp);
- gen = &dev->mdev->caps.gen;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+ if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
return -EINVAL;
} else {
@@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (pd) {
if (pd->uobject) {
+ __u32 max_wqes =
+ 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
mlx5_ib_dbg(dev, "invalid rq params\n");
return -EINVAL;
}
- if (ucmd.sq_wqe_count > gen->max_wqes) {
+ if (ucmd.sq_wqe_count > max_wqes) {
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, gen->max_wqes);
+ ucmd.sq_wqe_count, max_wqes);
return -EINVAL;
}
err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1014,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
} else {
in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
- in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ in->ctx.rq_type_srqn |=
+ cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
}
}
@@ -1226,7 +1225,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct mlx5_general_caps *gen;
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
@@ -1244,12 +1242,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
}
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
- gen = &dev->mdev->caps.gen;
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
- if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
+ if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
mlx5_ib_dbg(dev, "XRC not supported\n");
return ERR_PTR(-ENOSYS);
}
@@ -1356,9 +1353,6 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
- struct mlx5_general_caps *gen;
-
- gen = &dev->mdev->caps.gen;
if (rate == IB_RATE_PORT_CURRENT) {
return 0;
} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1366,7 +1360,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
} else {
while (rate != IB_RATE_2_5_GBPS &&
!(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- gen->stat_rate_support))
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
--rate;
}
@@ -1377,10 +1371,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
u32 path_flags, const struct ib_qp_attr *attr)
{
- struct mlx5_general_caps *gen;
int err;
- gen = &dev->mdev->caps.gen;
path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
@@ -1391,9 +1383,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->rlid = cpu_to_be16(ah->dlid);
if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
+ if (ah->grh.sgid_index >=
+ dev->mdev->port_caps[port - 1].gid_table_len) {
pr_err("sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
+ ah->grh.sgid_index,
+ dev->mdev->port_caps[port - 1].gid_table_len);
return -EINVAL;
}
path->grh_mlid |= 1 << 7;
@@ -1570,7 +1564,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
- struct mlx5_general_caps *gen;
struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1579,7 +1572,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
int mlx5_st;
int err;
- gen = &dev->mdev->caps.gen;
in = kzalloc(sizeof(*in), GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1619,7 +1611,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
+ context->mtu_msgmax = (attr->path_mtu << 5) |
+ (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
}
if (attr_mask & IB_QP_DEST_QPN)
@@ -1777,11 +1770,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
- struct mlx5_general_caps *gen;
int err = -EINVAL;
int port;
- gen = &dev->mdev->caps.gen;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1793,21 +1784,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
if ((attr_mask & IB_QP_PORT) &&
- (attr->port_num == 0 || attr->port_num > gen->num_ports))
+ (attr->port_num == 0 ||
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
goto out;
if (attr_mask & IB_QP_PKEY_INDEX) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
+ if (attr->pkey_index >=
+ dev->mdev->port_caps[port - 1].pkey_table_len)
goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
+ attr->max_rd_atomic >
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
goto out;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
+ attr->max_dest_rd_atomic >
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -3009,7 +3004,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
ib_ah_attr->port_num = path->port;
if (ib_ah_attr->port_num == 0 ||
- ib_ah_attr->port_num > dev->caps.gen.num_ports)
+ ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
return;
ib_ah_attr->sl = path->sl & 0xf;
@@ -3135,12 +3130,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_general_caps *gen;
struct mlx5_ib_xrcd *xrcd;
int err;
- gen = &dev->mdev->caps.gen;
- if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
return ERR_PTR(-ENOSYS);
xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 02d77a29764d..e008505e96e9 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -165,7 +165,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
return err;
}
- if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+ if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
goto err_db;
@@ -236,7 +236,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_general_caps *gen;
struct mlx5_ib_srq *srq;
int desc_size;
int buf_size;
@@ -245,13 +244,13 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
int uninitialized_var(inlen);
int is_xrc;
u32 flgs, xrcdn;
+ __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
- gen = &dev->mdev->caps.gen;
/* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
+ if (init_attr->attr.max_wr >= max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
- gen->max_srq_wqes);
+ max_srq_wqes);
return ERR_PTR(-EINVAL);
}
@@ -303,7 +302,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
- err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
+ err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
kvfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 1cc6ca8bfbda..85cfa4f8691f 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
return -1;
}
card->owner = THIS_MODULE;
- init_timer(&card->listentimer);
+ setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
@@ -2331,8 +2331,6 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
card->cipmask = 0x1FFF03FF; /* any */
card->cipmask2 = 0;
- card->listentimer.data = (unsigned long)card;
- card->listentimer.function = listentimerfunc;
send_listen(card);
mod_timer(&card->listentimer, jiffies + 60 * HZ);
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ead0a4fb7448..a0fdbc074b98 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -267,8 +267,8 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
}
// The descriptor is wrong for some early samples of the ST5481 chip
- altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
- altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
+ altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32);
+ altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32);
// Use alternative setting 3 on interface 0 to have 2B+D
if ((status = usb_set_interface(dev, 0, 3)) < 0) {
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 8dc7290089bb..0d29b5a6356d 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -601,14 +601,14 @@ static const struct proto_ops data_sock_ops = {
};
static int
-data_sock_create(struct net *net, struct socket *sock, int protocol)
+data_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
{
struct sock *sk;
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+ sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
if (!sk)
return -ENOMEM;
@@ -756,14 +756,14 @@ static const struct proto_ops base_sock_ops = {
static int
-base_sock_create(struct net *net, struct socket *sock, int protocol)
+base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
{
struct sock *sk;
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+ sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
if (!sk)
return -ENOMEM;
@@ -785,7 +785,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
switch (proto) {
case ISDN_P_BASE:
- err = base_sock_create(net, sock, proto);
+ err = base_sock_create(net, sock, proto, kern);
break;
case ISDN_P_TE_S0:
case ISDN_P_NT_S0:
@@ -799,7 +799,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
case ISDN_P_B_L2DTMF:
case ISDN_P_B_L2DSP:
case ISDN_P_B_L2DSPHDLC:
- err = data_sock_create(net, sock, proto);
+ err = data_sock_create(net, sock, proto, kern);
break;
default:
return err;
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 433f823037dd..ec1f46a6be3a 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -267,6 +267,10 @@ static void cmodio_pci_remove(struct pci_dev *dev)
static const struct pci_device_id cmodio_pci_ids[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0201 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0202 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0201 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0202 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index df51d6025a90..019fceffc9e5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -179,6 +179,20 @@ config VXLAN
To compile this driver as a module, choose M here: the module
will be called vxlan.
+config GENEVE
+ tristate "Generic Network Virtualization Encapsulation netdev"
+ depends on INET && GENEVE_CORE
+ select NET_IP_TUNNEL
+ ---help---
+ This allows one to create geneve virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ For more information see:
+ http://tools.ietf.org/html/draft-gross-geneve-02
+
+ To compile this driver as a module, choose M here: the module
+ will be called geneve.
+
config NETCONSOLE
tristate "Network console logging support"
---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e25fdd7d905e..c12cb22478a7 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_NLMON) += nlmon.o
#
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index fbd54f0e32e8..7fde4d5c2b28 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -75,10 +75,10 @@
/* Port Key definitions
* key is determined according to the link speed, duplex and
* user key (which is yet not supported)
- * --------------------------------------------------------------
- * Port key : | User key | Speed | Duplex |
- * --------------------------------------------------------------
- * 16 6 1 0
+ * --------------------------------------------------------------
+ * Port key | User key (10 bits) | Speed (5 bits) | Duplex|
+ * --------------------------------------------------------------
+ * |15 6|5 1|0
*/
#define AD_DUPLEX_KEY_MASKS 0x1
#define AD_SPEED_KEY_MASKS 0x3E
@@ -1908,8 +1908,14 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
BOND_AD_INFO(bond).aggregator_identifier = 0;
- BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
- BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
/* initialize how many times this module is called in one
* second (should be about every 100ms)
@@ -1945,10 +1951,10 @@ void bond_3ad_bind_slave(struct slave *slave)
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
- /* key is determined according to the link speed, duplex and user key(which
- * is yet not supported)
+ /* key is determined according to the link speed, duplex and
+ * user key
*/
- port->actor_admin_port_key = 0;
+ port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
port->actor_admin_port_key |= __get_duplex(port);
port->actor_admin_port_key |= (__get_link_speed(port) << 1);
port->actor_oper_port_key = port->actor_admin_port_key;
@@ -1959,6 +1965,8 @@ void bond_3ad_bind_slave(struct slave *slave)
port->sm_vars &= ~AD_PORT_LACP_ENABLED;
/* actor system is the bond's system */
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+ port->actor_system_priority =
+ BOND_AD_INFO(bond).system.sys_priority;
/* tx timer(to verify that no more than MAX_TX_IN_SECOND
* lacpdu's are sent in one second)
*/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d5fe5d5f490f..19eb990d398c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,7 +76,7 @@
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
#include <linux/rculist.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include <net/switchdev.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
@@ -1015,10 +1015,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
- /* If any slave has the offload feature flag set,
- * set the offload flag on the bond.
- */
- mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@@ -3054,16 +3051,15 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
int noff, proto = -1;
if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
- return skb_flow_dissect(skb, fk);
+ return skb_flow_dissect_flow_keys(skb, fk);
- fk->ports = 0;
+ fk->ports.ports = 0;
noff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
return false;
iph = ip_hdr(skb);
- fk->src = iph->saddr;
- fk->dst = iph->daddr;
+ iph_to_flow_copy_v4addrs(fk, iph);
noff += iph->ihl << 2;
if (!ip_is_fragment(iph))
proto = iph->protocol;
@@ -3071,15 +3067,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
return false;
iph6 = ipv6_hdr(skb);
- fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
- fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+ iph_to_flow_copy_v6addrs(fk, iph6);
noff += sizeof(*iph6);
proto = iph6->nexthdr;
} else {
return false;
}
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
- fk->ports = skb_flow_get_ports(skb, noff, proto);
+ fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
return true;
}
@@ -3105,8 +3100,9 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
hash = bond_eth_hash(skb);
else
- hash = (__force u32)flow.ports;
- hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+ hash = (__force u32)flow.ports.ports;
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
@@ -4039,8 +4035,12 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
- .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
- .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_features_check = passthru_features_check,
};
@@ -4140,6 +4140,8 @@ static int bond_check_params(struct bond_params *params)
struct bond_opt_value newval;
const struct bond_opt_value *valptr;
int arp_all_targets_value;
+ u16 ad_actor_sys_prio = 0;
+ u16 ad_user_port_key = 0;
/* Convert string parameters. */
if (mode) {
@@ -4434,6 +4436,24 @@ static int bond_check_params(struct bond_params *params)
fail_over_mac_value = BOND_FOM_NONE;
}
+ bond_opt_initstr(&newval, "default");
+ valptr = bond_opt_parse(
+ bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: No ad_actor_sys_prio default value");
+ return -EINVAL;
+ }
+ ad_actor_sys_prio = valptr->value;
+
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
+ &newval);
+ if (!valptr) {
+ pr_err("Error: No ad_user_port_key default value");
+ return -EINVAL;
+ }
+ ad_user_port_key = valptr->value;
+
if (lp_interval == 0) {
pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4462,6 +4482,9 @@ static int bond_check_params(struct bond_params *params)
params->lp_interval = lp_interval;
params->packets_per_slave = packets_per_slave;
params->tlb_dynamic_lb = 1; /* Default value */
+ params->ad_actor_sys_prio = ad_actor_sys_prio;
+ eth_zero_addr(params->ad_actor_system);
+ params->ad_user_port_key = ad_user_port_key;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 7b1124366011..f7015eb4f8db 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -94,6 +94,10 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
[IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
[IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
+ [IFLA_BOND_AD_ACTOR_SYS_PRIO] = { .type = NLA_U16 },
+ [IFLA_BOND_AD_USER_PORT_KEY] = { .type = NLA_U16 },
+ [IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
+ .len = ETH_ALEN },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -379,6 +383,36 @@ static int bond_changelink(struct net_device *bond_dev,
if (err)
return err;
}
+ if (data[IFLA_BOND_AD_ACTOR_SYS_PRIO]) {
+ int actor_sys_prio =
+ nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
+
+ bond_opt_initval(&newval, actor_sys_prio);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
+ int port_key =
+ nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
+
+ bond_opt_initval(&newval, port_key);
+ err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
+ if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
+ return -EINVAL;
+
+ bond_opt_initval(&newval,
+ nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -426,6 +460,9 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
+ nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
0;
}
@@ -551,6 +588,19 @@ static int bond_fill_info(struct sk_buff *skb,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
+ if (nla_put_u16(skb, IFLA_BOND_AD_ACTOR_SYS_PRIO,
+ bond->params.ad_actor_sys_prio))
+ goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_BOND_AD_USER_PORT_KEY,
+ bond->params.ad_user_port_key))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
+ sizeof(bond->params.ad_actor_system),
+ &bond->params.ad_actor_system))
+ goto nla_put_failure;
+
if (!bond_3ad_get_active_agg_info(bond, &info)) {
struct nlattr *nest;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e8d3c1d35453..e9c624d54dd4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -70,6 +70,12 @@ static int bond_option_slaves_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -186,6 +192,18 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN},
+ { "maxval", 65535, BOND_VALFLAG_MAX | BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
+static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
+ { "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+ { "maxval", 1023, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -379,6 +397,29 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
+ },
+ [BOND_OPT_AD_ACTOR_SYS_PRIO] = {
+ .id = BOND_OPT_AD_ACTOR_SYS_PRIO,
+ .name = "ad_actor_sys_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_ad_actor_sys_prio_tbl,
+ .set = bond_option_ad_actor_sys_prio_set,
+ },
+ [BOND_OPT_AD_ACTOR_SYSTEM] = {
+ .id = BOND_OPT_AD_ACTOR_SYSTEM,
+ .name = "ad_actor_system",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN,
+ .set = bond_option_ad_actor_system_set,
+ },
+ [BOND_OPT_AD_USER_PORT_KEY] = {
+ .id = BOND_OPT_AD_USER_PORT_KEY,
+ .name = "ad_user_port_key",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_ad_user_port_key_tbl,
+ .set = bond_option_ad_user_port_key_set,
}
};
@@ -1349,3 +1390,53 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
return 0;
}
+
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
+ newval->value);
+
+ bond->params.ad_actor_sys_prio = newval->value;
+ return 0;
+}
+
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ u8 macaddr[ETH_ALEN];
+ u8 *mac;
+ int i;
+
+ if (newval->string) {
+ i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &macaddr[0], &macaddr[1], &macaddr[2],
+ &macaddr[3], &macaddr[4], &macaddr[5]);
+ if (i != ETH_ALEN)
+ goto err;
+ mac = macaddr;
+ } else {
+ mac = (u8 *)&newval->value;
+ }
+
+ if (!is_valid_ether_addr(mac))
+ goto err;
+
+ netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
+ ether_addr_copy(bond->params.ad_actor_system, mac);
+ return 0;
+
+err:
+ netdev_err(bond->dev, "Invalid MAC address.\n");
+ return -EINVAL;
+}
+
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting ad_user_port_key to %llu\n",
+ newval->value);
+
+ bond->params.ad_user_port_key = newval->value;
+ return 0;
+}
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index b20b35acb47d..e7f3047a26df 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -135,6 +135,10 @@ static void bond_info_show_master(struct seq_file *seq)
bond->params.ad_select);
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
optval->string);
+ seq_printf(seq, "System priority: %d\n",
+ BOND_AD_INFO(bond).system.sys_priority);
+ seq_printf(seq, "System MAC address: %pM\n",
+ &BOND_AD_INFO(bond).system.sys_mac_addr);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
seq_printf(seq, "bond %s has no active aggregator\n",
@@ -198,6 +202,8 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_puts(seq, "details actor lacp pdu:\n");
seq_printf(seq, " system priority: %d\n",
port->actor_system_priority);
+ seq_printf(seq, " system mac address: %pM\n",
+ &port->actor_system);
seq_printf(seq, " port key: %d\n",
port->actor_oper_port_key);
seq_printf(seq, " port priority: %d\n",
@@ -210,6 +216,8 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_puts(seq, "details partner lacp pdu:\n");
seq_printf(seq, " system priority: %d\n",
port->partner_oper.system_priority);
+ seq_printf(seq, " system mac address: %pM\n",
+ &port->partner_oper.system);
seq_printf(seq, " oper key: %d\n",
port->partner_oper.key);
seq_printf(seq, " port priority: %d\n",
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 7e9e151d4d61..143a2abd1c1c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -692,6 +692,49 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
+static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+
+ return 0;
+}
+static DEVICE_ATTR(ad_actor_sys_prio, S_IRUGO | S_IWUSR,
+ bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_actor_system(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+
+ return 0;
+}
+
+static DEVICE_ATTR(ad_actor_system, S_IRUGO | S_IWUSR,
+ bonding_show_ad_actor_system, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_user_port_key(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+
+ return 0;
+}
+static DEVICE_ATTR(ad_user_port_key, S_IRUGO | S_IWUSR,
+ bonding_show_ad_user_port_key, bonding_sysfs_store_option);
+
static struct attribute *per_bond_attrs[] = {
&dev_attr_slaves.attr,
&dev_attr_mode.attr,
@@ -725,6 +768,9 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_lp_interval.attr,
&dev_attr_packets_per_slave.attr,
&dev_attr_tlb_dynamic_lb.attr,
+ &dev_attr_ad_actor_sys_prio.attr,
+ &dev_attr_ad_actor_system.attr,
+ &dev_attr_ad_user_port_key.attr,
NULL,
};
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ad0a7e8c2c2b..6201c5a1a884 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -93,13 +93,13 @@
(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
/* FLEXCAN control register 2 (CTRL2) bits */
-#define FLEXCAN_CRL2_ECRWRE BIT(29)
-#define FLEXCAN_CRL2_WRMFRZ BIT(28)
-#define FLEXCAN_CRL2_RFFN(x) (((x) & 0x0f) << 24)
-#define FLEXCAN_CRL2_TASD(x) (((x) & 0x1f) << 19)
-#define FLEXCAN_CRL2_MRP BIT(18)
-#define FLEXCAN_CRL2_RRS BIT(17)
-#define FLEXCAN_CRL2_EACEN BIT(16)
+#define FLEXCAN_CTRL2_ECRWRE BIT(29)
+#define FLEXCAN_CTRL2_WRMFRZ BIT(28)
+#define FLEXCAN_CTRL2_RFFN(x) (((x) & 0x0f) << 24)
+#define FLEXCAN_CTRL2_TASD(x) (((x) & 0x1f) << 19)
+#define FLEXCAN_CTRL2_MRP BIT(18)
+#define FLEXCAN_CTRL2_RRS BIT(17)
+#define FLEXCAN_CTRL2_EACEN BIT(16)
/* FLEXCAN memory error control register (MECR) bits */
#define FLEXCAN_MECR_ECRWRDIS BIT(31)
@@ -158,7 +158,6 @@
FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
/* FLEXCAN message buffers */
-#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24)
#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24)
#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24)
#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24)
@@ -184,14 +183,14 @@
* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err
- * Filter? connected? detection
- * MX25 FlexCAN2 03.00.00.00 no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no
- * MX35 FlexCAN2 03.00.00.00 no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no
- * VF610 FlexCAN3 ? no yes yes
+ * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re-
+ * Filter? connected? detection ception in MB
+ * MX25 FlexCAN2 03.00.00.00 no no no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no
+ * MX35 FlexCAN2 03.00.00.00 no no no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no yes
+ * VF610 FlexCAN3 ? no yes yes yes?
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
@@ -221,7 +220,7 @@ struct flexcan_regs {
u32 imask1; /* 0x28 */
u32 iflag2; /* 0x2c */
u32 iflag1; /* 0x30 */
- u32 crl2; /* 0x34 */
+ u32 ctrl2; /* 0x34 */
u32 esr2; /* 0x38 */
u32 imeur; /* 0x3c */
u32 lrfr; /* 0x40 */
@@ -230,6 +229,16 @@ struct flexcan_regs {
u32 rxfir; /* 0x4c */
u32 _reserved3[12]; /* 0x50 */
struct flexcan_mb cantxfg[64]; /* 0x80 */
+ /* FIFO-mode:
+ * MB
+ * 0x080...0x08f 0 RX message buffer
+ * 0x090...0x0df 1-5 reserverd
+ * 0x0e0...0x0ff 6-7 8 entry ID table
+ * (mx25, mx28, mx35, mx53)
+ * 0x0e0...0x2df 6-7..37 8..128 entry ID table
+ * size conf'ed via ctrl2::RFFN
+ * (mx6, vf610)
+ */
u32 _reserved4[408];
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
@@ -468,7 +477,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id;
- u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
+ u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -815,7 +824,7 @@ static int flexcan_chip_start(struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr;
+ u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
int err, i;
/* enable module */
@@ -918,9 +927,9 @@ static int flexcan_chip_start(struct net_device *dev)
* and Correction of Memory Errors" to write to
* MECR register
*/
- reg_crl2 = flexcan_read(&regs->crl2);
- reg_crl2 |= FLEXCAN_CRL2_ECRWRE;
- flexcan_write(reg_crl2, &regs->crl2);
+ reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+ flexcan_write(reg_ctrl2, &regs->ctrl2);
reg_mecr = flexcan_read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 4dd183a3643a..c1e85368a198 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -40,6 +40,7 @@
#define MSYNC_PEER 0x00 /* ICAN only */
#define MSYNC_LOCL 0x01 /* host only */
#define TARGET_RUNNING 0x02
+#define FIRMWARE_STAMP 0x60 /* big endian firmware stamp */
#define MSYNC_RB0 0x01
#define MSYNC_RB1 0x02
@@ -83,6 +84,7 @@
#define MSG_COFFREQ 0x42
#define MSG_CONREQ 0x43
#define MSG_CCONFREQ 0x47
+#define MSG_LMTS 0xb4
/*
* Janz ICAN3 CAN Inquiry Message Types
@@ -165,6 +167,12 @@
/* SJA1000 Clock Input */
#define ICAN3_CAN_CLOCK 8000000
+/* Janz ICAN3 firmware types */
+enum ican3_fwtype {
+ ICAN3_FWTYPE_ICANOS,
+ ICAN3_FWTYPE_CAL_CANOPEN,
+};
+
/* Driver Name */
#define DRV_NAME "janz-ican3"
@@ -215,6 +223,10 @@ struct ican3_dev {
struct completion buserror_comp;
struct can_berr_counter bec;
+ /* firmware type */
+ enum ican3_fwtype fwtype;
+ char fwinfo[32];
+
/* old and new style host interface */
unsigned int iftype;
@@ -750,13 +762,61 @@ static int ican3_set_id_filter(struct ican3_dev *mod, bool accept)
*/
static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
{
+ struct can_bittiming *bt = &mod->can.bittiming;
struct ican3_msg msg;
+ u8 btr0, btr1;
+ int res;
- memset(&msg, 0, sizeof(msg));
- msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
- msg.len = cpu_to_le16(0);
+ /* This algorithm was stolen from drivers/net/can/sja1000/sja1000.c */
+ /* The bittiming register command for the ICAN3 just sets the bit timing */
+ /* registers on the SJA1000 chip directly */
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
- return ican3_send_msg(mod, &msg);
+ if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+ if (on) {
+ /* set bittiming */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CBTRREQ;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0x00;
+ msg.data[1] = 0x00;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+
+ res = ican3_send_msg(mod, &msg);
+ if (res)
+ return res;
+ }
+
+ /* can-on/off request */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+ msg.len = cpu_to_le16(0);
+
+ return ican3_send_msg(mod, &msg);
+
+ } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_LMTS;
+ if (on) {
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = 0;
+ msg.data[1] = 0;
+ msg.data[2] = btr0;
+ msg.data[3] = btr1;
+ } else {
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 1;
+ msg.data[1] = 0;
+ }
+
+ return ican3_send_msg(mod, &msg);
+ }
+ return -ENOTSUPP;
}
static int ican3_set_termination(struct ican3_dev *mod, bool on)
@@ -1402,7 +1462,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
return 0;
msleep(10);
- } while (time_before(jiffies, start + HZ / 4));
+ } while (time_before(jiffies, start + HZ / 2));
netdev_err(mod->ndev, "failed to reset CAN module\n");
return -ETIMEDOUT;
@@ -1427,6 +1487,17 @@ static int ican3_startup_module(struct ican3_dev *mod)
return ret;
}
+ /* detect firmware */
+ memcpy_fromio(mod->fwinfo, mod->dpm + FIRMWARE_STAMP, sizeof(mod->fwinfo) - 1);
+ if (strncmp(mod->fwinfo, "JANZ-ICAN3", 10)) {
+ netdev_err(mod->ndev, "ICAN3 not detected (found %s)\n", mod->fwinfo);
+ return -ENODEV;
+ }
+ if (strstr(mod->fwinfo, "CAL/CANopen"))
+ mod->fwtype = ICAN3_FWTYPE_CAL_CANOPEN;
+ else
+ mod->fwtype = ICAN3_FWTYPE_ICANOS;
+
/* re-enable interrupts so we can send messages */
iowrite8(1 << mod->num, &mod->ctrl->int_enable);
@@ -1615,36 +1686,6 @@ static const struct can_bittiming_const ican3_bittiming_const = {
.brp_inc = 1,
};
-/*
- * This routine was stolen from drivers/net/can/sja1000/sja1000.c
- *
- * The bittiming register command for the ICAN3 just sets the bit timing
- * registers on the SJA1000 chip directly
- */
-static int ican3_set_bittiming(struct net_device *ndev)
-{
- struct ican3_dev *mod = netdev_priv(ndev);
- struct can_bittiming *bt = &mod->can.bittiming;
- struct ican3_msg msg;
- u8 btr0, btr1;
-
- btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
- btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
- (((bt->phase_seg2 - 1) & 0x7) << 4);
- if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- btr1 |= 0x80;
-
- memset(&msg, 0, sizeof(msg));
- msg.spec = MSG_CBTRREQ;
- msg.len = cpu_to_le16(4);
- msg.data[0] = 0x00;
- msg.data[1] = 0x00;
- msg.data[2] = btr0;
- msg.data[3] = btr1;
-
- return ican3_send_msg(mod, &msg);
-}
-
static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
{
struct ican3_dev *mod = netdev_priv(ndev);
@@ -1730,11 +1771,22 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
return count;
}
+static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+}
+
static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
ican3_sysfs_set_term);
+static DEVICE_ATTR(fwinfo, S_IRUSR | S_IRUGO, ican3_sysfs_show_fwinfo, NULL);
static struct attribute *ican3_sysfs_attrs[] = {
&dev_attr_termination.attr,
+ &dev_attr_fwinfo.attr,
NULL,
};
@@ -1794,7 +1846,6 @@ static int ican3_probe(struct platform_device *pdev)
mod->can.clock.freq = ICAN3_CAN_CLOCK;
mod->can.bittiming_const = &ican3_bittiming_const;
- mod->can.do_set_bittiming = ican3_set_bittiming;
mod->can.do_set_mode = ican3_set_mode;
mod->can.do_get_berr_counter = ican3_get_berr_counter;
mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
@@ -1866,7 +1917,7 @@ static int ican3_probe(struct platform_device *pdev)
goto out_free_irq;
}
- dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+ netdev_info(mod->ndev, "module %d: registered CAN device\n", pdata->modno);
return 0;
out_free_irq:
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bf63fee4e743..c1a95a34d62e 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -190,10 +190,11 @@
#define RXBEID0_OFF 4
#define RXBDLC_OFF 5
#define RXBDAT_OFF 6
-#define RXFSIDH(n) ((n) * 4)
-#define RXFSIDL(n) ((n) * 4 + 1)
-#define RXFEID8(n) ((n) * 4 + 2)
-#define RXFEID0(n) ((n) * 4 + 3)
+#define RXFSID(n) ((n < 3) ? 0 : 4)
+#define RXFSIDH(n) ((n) * 4 + RXFSID(n))
+#define RXFSIDL(n) ((n) * 4 + 1 + RXFSID(n))
+#define RXFEID8(n) ((n) * 4 + 2 + RXFSID(n))
+#define RXFEID0(n) ((n) * 4 + 3 + RXFSID(n))
#define RXMSIDH(n) ((n) * 4 + 0x20)
#define RXMSIDL(n) ((n) * 4 + 0x21)
#define RXMEID8(n) ((n) * 4 + 0x22)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 18550c7ebe6f..7ad0a4d8e475 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,22 +37,22 @@ config NET_DSA_MV88E6123_61_65
ethernet switch chips.
config NET_DSA_MV88E6171
- tristate "Marvell 88E6171/6172 ethernet switch chip support"
+ tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6171/6172 ethernet switch
- chips.
+ This enables support for the Marvell 88E6171/6175/6350/6351
+ ethernet switches chips.
config NET_DSA_MV88E6352
- tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+ tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6176 and 88E6352 ethernet
- switch chips.
+ This enables support for the Marvell 88E6172, 88E6176 and 88E6352
+ ethernet switch chips.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index cedb572bf25a..972982f8bea7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -24,6 +24,7 @@
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
+#include <linux/brcmphy.h>
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
@@ -697,7 +698,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* Include the pseudo-PHY address and the broadcast PHY address to
* divert reads towards our workaround
*/
- ds->phys_mii_mask |= ((1 << 30) | (1 << 0));
+ ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
rev = reg_readl(priv, REG_SWITCH_REVISION);
priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
@@ -782,7 +783,7 @@ static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
*/
switch (addr) {
case 0:
- case 30:
+ case BRCM_PSEUDO_PHY_ADDR:
return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
default:
return 0xffff;
@@ -797,7 +798,7 @@ static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
*/
switch (addr) {
case 0:
- case 30:
+ case BRCM_PSEUDO_PHY_ADDR:
bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
break;
}
@@ -911,6 +912,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
*/
if (port == 7) {
status->link = priv->port_sts[port].link;
+ /* For MoCA interfaces, also force a link down notification
+ * since some version of the user-space daemon (mocad) use
+ * cmd->autoneg to force the link, which messes up the PHY
+ * state machine and make it go in PHY_FORCING state instead.
+ */
+ if (!status->link)
+ netif_carrier_off(ds->ports[port]);
status->duplex = 1;
} else {
status->link = 1;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index b4af6d5aff7c..71a29a7ce538 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -54,192 +54,40 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
{
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Disable the PHY polling unit (since there won't be any
* external PHYs to poll), don't discard packets with
* excessive collisions, and mask all interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
- REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:2x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Disable the loopback filter, disable flow control
- * messages, disable flood broadcast override, disable
- * removing of provider tags, disable ATU age violation
- * interrupts, disable tag flow control, force flow
- * control priority to the highest, and send all special
- * multicast frames to the CPU at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop;
-
- nexthop = 0x1f;
- if (i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
- /* Disable ingress rate limiting by resetting all ingress
- * rate limit registers to their initial state.
- */
- for (i = 0; i < 6; i++)
- REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
- /* Initialise cross-chip port VLAN table to reset defaults. */
- REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
- /* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
- /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
return 0;
}
-static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
-{
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
- */
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
- else
- REG_WRITE(addr, 0x01, 0x0003);
-
- /* Do not limit the period of time that this port can be
- * paused for by the remote end or the period of time that
- * this port can pause the remote end.
- */
- REG_WRITE(addr, 0x02, 0x0000);
-
- /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
- * disable Header mode, enable IGMP/MLD snooping, disable VLAN
- * tunneling, determine priority by looking at 802.1p and IP
- * priority fields (IP prio has precedence), and set STP state
- * to Forwarding.
- *
- * If this is the CPU link, use DSA or EDSA tagging depending
- * on which tagging mode was configured.
- *
- * If this is a link to another switch, use DSA tagging mode.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts and multicasts.
- */
- val = 0x0433;
- if (dsa_is_cpu_port(ds, p)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- val |= 0x3300;
- else
- val |= 0x0100;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- if (p == dsa_upstream_port(ds))
- val |= 0x000c;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
- */
- REG_WRITE(addr, 0x08, 0x2080);
-
- /* Egress rate control: disable egress rate control. */
- REG_WRITE(addr, 0x09, 0x0001);
-
- /* Egress rate control 2: disable egress rate control. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Port ATU control: disable limiting the number of address
- * database entries that this port is allowed to use.
- */
- REG_WRITE(addr, 0x0c, 0x0000);
-
- /* Priority Override: disable DA, SA and VTU priority override. */
- REG_WRITE(addr, 0x0d, 0x0000);
-
- /* Port Ethertype: use the Ethertype DSA Ethertype value. */
- REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
static int mv88e6123_61_65_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i;
int ret;
ret = mv88e6xxx_setup_common(ds);
@@ -262,19 +110,11 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6123_61_65_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6123_61_65_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return mv88e6xxx_setup_ports(ds);
}
struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index e54824fa0d95..32f4a08e9bc9 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -37,6 +37,8 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
return "Marvell 88E6131 (B2)";
if (ret_masked == PORT_SWITCH_ID_6131)
return "Marvell 88E6131";
+ if (ret_masked == PORT_SWITCH_ID_6185)
+ return "Marvell 88E6185";
}
return NULL;
@@ -44,186 +46,62 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
static int mv88e6131_setup_global(struct dsa_switch *ds)
{
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Enable the PHY polling unit, don't discard packets with
* excessive collisions, use a weighted fair queueing scheme
* to arbitrate between packet queues, set the maximum frame
* size to 1632, and mask all interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
/* Set the VLAN ethertype to 0x8100. */
- REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+ REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
/* Disable ARP mirroring, and configure the upstream port as
* the port to which ingress and egress monitor frames are to
* be sent.
*/
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable cascade port functionality unless this device
* is used in a cascade configuration, and set the switch's
* DSA device number.
*/
if (ds->dst->pd->nr_chips > 1)
- REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
else
- REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Ignore removed tag data on doubly tagged packets, disable
- * flow control messages, force flow control priority to the
- * highest, and send all special multicast frames to the CPU
- * port at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop;
-
- nexthop = 0x1f;
- if (ds->pd->rtable &&
- i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_NO_CASCADE |
+ (ds->index & 0x1f));
/* Force the priority of IGMP/MLD snoop frames and ARP frames
* to the highest setting.
*/
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+ GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
+ 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
+ GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
+ 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
return 0;
}
-static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * (100 Mb/s on 6085) full duplex.
- */
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- if (ps->id == PORT_SWITCH_ID_6085)
- REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
- else
- REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
- else
- REG_WRITE(addr, 0x01, 0x0003);
-
- /* Port Control: disable Core Tag, disable Drop-on-Lock,
- * transmit frames unmodified, disable Header mode,
- * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
- * tunneling, determine priority by looking at 802.1p and
- * IP priority fields (IP prio has precedence), and set STP
- * state to Forwarding.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts, and enable DSA tagging
- * mode.
- *
- * If this is the link to another switch, use DSA tagging
- * mode, but do not enable forwarding of unknown unicasts.
- */
- val = 0x0433;
- if (p == dsa_upstream_port(ds)) {
- val |= 0x0104;
- /* On 6085, unknown multicast forward is controlled
- * here rather than in Port Control 2 register.
- */
- if (ps->id == PORT_SWITCH_ID_6085)
- val |= 0x0008;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, don't use
- * VLAN-based, source address-based or destination
- * address-based priority overrides, don't let the switch
- * add or strip 802.1q tags, don't discard tagged or
- * untagged frames on this port, do a destination address
- * lookup on received packets as usual, don't send a copy
- * of all transmitted/received frames on this port to the
- * CPU, and configure the upstream port number.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown multicast addresses.
- */
- if (ps->id == PORT_SWITCH_ID_6085)
- /* on 6085, bits 3:0 are reserved, bit 6 control ARP
- * mirroring, and multicast forward is handled in
- * Port Control register.
- */
- REG_WRITE(addr, 0x08, 0x0080);
- else {
- val = 0x0080 | dsa_upstream_port(ds);
- if (p == dsa_upstream_port(ds))
- val |= 0x0040;
- REG_WRITE(addr, 0x08, val);
- }
-
- /* Rate Control: disable ingress rate limiting. */
- REG_WRITE(addr, 0x09, 0x0000);
-
- /* Rate Control 2: disable egress rate limiting. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
static int mv88e6131_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i;
int ret;
ret = mv88e6xxx_setup_common(ds);
@@ -234,6 +112,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
switch (ps->id) {
case PORT_SWITCH_ID_6085:
+ case PORT_SWITCH_ID_6185:
ps->num_ports = 10;
break;
case PORT_SWITCH_ID_6095:
@@ -251,19 +130,11 @@ static int mv88e6131_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6131_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6131_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return mv88e6xxx_setup_ports(ds);
}
static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 9104efea0e3e..1c7808495a9d 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
* Copyright (c) 2008-2009 Marvell Semiconductor
* Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
*
@@ -29,8 +29,12 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
if (ret >= 0) {
if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
return "Marvell 88E6171";
- if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
- return "Marvell 88E6172";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6175)
+ return "Marvell 88E6175";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6350)
+ return "Marvell 88E6350";
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6351)
+ return "Marvell 88E6351";
}
return NULL;
@@ -38,196 +42,41 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
static int mv88e6171_setup_global(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Discard packets with excessive collisions, mask all
* interrupt sources, enable PPU.
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
- if (REG_READ(REG_PORT(0), 0x03) == 0x1710)
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1111));
- else
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
- REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:2x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Disable the loopback filter, disable flow control
- * messages, disable flood broadcast override, disable
- * removing of provider tags, disable ATU age violation
- * interrupts, disable tag flow control, force flow
- * control priority to the highest, and send all special
- * multicast frames to the CPU at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop;
-
- nexthop = 0x1f;
- if (i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < ps->num_ports; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
- /* Disable ingress rate limiting by resetting all ingress
- * rate limit registers to their initial state.
- */
- for (i = 0; i < 6; i++)
- REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
- /* Initialise cross-chip port VLAN table to reset defaults. */
- REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
- /* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
- /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
return 0;
}
-static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
-{
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
- */
- val = REG_READ(addr, 0x01);
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, val | 0x003e);
- else
- REG_WRITE(addr, 0x01, val | 0x0003);
-
- /* Do not limit the period of time that this port can be
- * paused for by the remote end or the period of time that
- * this port can pause the remote end.
- */
- REG_WRITE(addr, 0x02, 0x0000);
-
- /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
- * disable Header mode, enable IGMP/MLD snooping, disable VLAN
- * tunneling, determine priority by looking at 802.1p and IP
- * priority fields (IP prio has precedence), and set STP state
- * to Forwarding.
- *
- * If this is the CPU link, use DSA or EDSA tagging depending
- * on which tagging mode was configured.
- *
- * If this is a link to another switch, use DSA tagging mode.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts and multicasts.
- */
- val = 0x0433;
- if (dsa_is_cpu_port(ds, p)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- val |= 0x3300;
- else
- val |= 0x0100;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- if (p == dsa_upstream_port(ds))
- val |= 0x000c;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
- */
- REG_WRITE(addr, 0x08, 0x2080);
-
- /* Egress rate control: disable egress rate control. */
- REG_WRITE(addr, 0x09, 0x0001);
-
- /* Egress rate control 2: disable egress rate control. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Port ATU control: disable limiting the number of address
- * database entries that this port is allowed to use.
- */
- REG_WRITE(addr, 0x0c, 0x0000);
-
- /* Priority Override: disable DA, SA and VTU priority override. */
- REG_WRITE(addr, 0x0d, 0x0000);
-
- /* Port Ethertype: use the Ethertype DSA Ethertype value. */
- REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
static int mv88e6171_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int i;
int ret;
ret = mv88e6xxx_setup_common(ds);
@@ -240,44 +89,11 @@ static int mv88e6171_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6171_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
- continue;
-
- ret = mv88e6171_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (ps->id == PORT_SWITCH_ID_6172)
- return mv88e6xxx_get_eee(ds, port, e);
-
- return -EOPNOTSUPP;
-}
-
-static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
- struct phy_device *phydev, struct ethtool_eee *e)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (ps->id == PORT_SWITCH_ID_6172)
- return mv88e6xxx_set_eee(ds, port, phydev, e);
-
- return -EOPNOTSUPP;
+ return mv88e6xxx_setup_ports(ds);
}
struct dsa_switch_driver mv88e6171_switch_driver = {
@@ -292,8 +108,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_sset_count = mv88e6xxx_get_sset_count,
- .set_eee = mv88e6171_set_eee,
- .get_eee = mv88e6171_get_eee,
#ifdef CONFIG_NET_DSA_HWMON
.get_temp = mv88e6xxx_get_temp,
#endif
@@ -308,4 +122,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
};
MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6175");
+MODULE_ALIAS("platform:mv88e6350");
+MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 126c11b81e75..632815c10a40 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -32,6 +32,8 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
+ if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
+ return "Marvell 88E6172";
if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
return "Marvell 88E6176";
if (ret == PORT_SWITCH_ID_6352_A0)
@@ -47,187 +49,37 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
static int mv88e6352_setup_global(struct dsa_switch *ds)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u32 upstream_port = dsa_upstream_port(ds);
int ret;
- int i;
+ u32 reg;
+
+ ret = mv88e6xxx_setup_global(ds);
+ if (ret)
+ return ret;
/* Discard packets with excessive collisions,
* mask all interrupt sources, enable PPU (bit 14, undocumented).
*/
- REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
- /* Set the default address aging time to 5 minutes, and
- * enable address learn messages to be sent to all message
- * ports.
- */
- REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
- /* Configure the priority mapping registers. */
- ret = mv88e6xxx_config_prio(ds);
- if (ret < 0)
- return ret;
+ REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
/* Configure the upstream port, and configure the upstream
* port as the port to which ingress and egress monitor frames
* are to be sent.
*/
- REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
/* Disable remote management for now, and set the switch's
* DSA device number.
*/
REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:2x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
- /* Send all frames with destination addresses matching
- * 01:80:c2:00:00:0x to the CPU port.
- */
- REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
- /* Disable the loopback filter, disable flow control
- * messages, disable flood broadcast override, disable
- * removing of provider tags, disable ATU age violation
- * interrupts, disable tag flow control, force flow
- * control priority to the highest, and send all special
- * multicast frames to the CPU at the highest priority.
- */
- REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
- /* Program the DSA routing table. */
- for (i = 0; i < 32; i++) {
- int nexthop = 0x1f;
-
- if (i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
-
- REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
- }
-
- /* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
-
- /* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
- /* Disable ingress rate limiting by resetting all ingress
- * rate limit registers to their initial state.
- */
- for (i = 0; i < ps->num_ports; i++)
- REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
- /* Initialise cross-chip port VLAN table to reset defaults. */
- REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
- /* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
- /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
-
return 0;
}
-static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
-{
- int addr = REG_PORT(p);
- u16 val;
-
- /* MAC Forcing register: don't force link, speed, duplex
- * or flow control state to any particular values on physical
- * ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
- */
- if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
- else
- REG_WRITE(addr, 0x01, 0x0003);
-
- /* Do not limit the period of time that this port can be
- * paused for by the remote end or the period of time that
- * this port can pause the remote end.
- */
- REG_WRITE(addr, 0x02, 0x0000);
-
- /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
- * disable Header mode, enable IGMP/MLD snooping, disable VLAN
- * tunneling, determine priority by looking at 802.1p and IP
- * priority fields (IP prio has precedence), and set STP state
- * to Forwarding.
- *
- * If this is the CPU link, use DSA or EDSA tagging depending
- * on which tagging mode was configured.
- *
- * If this is a link to another switch, use DSA tagging mode.
- *
- * If this is the upstream port for this switch, enable
- * forwarding of unknown unicasts and multicasts.
- */
- val = 0x0433;
- if (dsa_is_cpu_port(ds, p)) {
- if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
- val |= 0x3300;
- else
- val |= 0x0100;
- }
- if (ds->dsa_port_mask & (1 << p))
- val |= 0x0100;
- if (p == dsa_upstream_port(ds))
- val |= 0x000c;
- REG_WRITE(addr, 0x04, val);
-
- /* Port Control 2: don't force a good FCS, set the maximum
- * frame size to 10240 bytes, don't let the switch add or
- * strip 802.1q tags, don't discard tagged or untagged frames
- * on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't
- * send a copy of all transmitted/received frames on this port
- * to the CPU.
- */
- REG_WRITE(addr, 0x08, 0x2080);
-
- /* Egress rate control: disable egress rate control. */
- REG_WRITE(addr, 0x09, 0x0001);
-
- /* Egress rate control 2: disable egress rate control. */
- REG_WRITE(addr, 0x0a, 0x0000);
-
- /* Port Association Vector: when learning source addresses
- * of packets, add the address to the address database using
- * a port bitmap that has only the bit for this port set and
- * the other bits clear.
- */
- REG_WRITE(addr, 0x0b, 1 << p);
-
- /* Port ATU control: disable limiting the number of address
- * database entries that this port is allowed to use.
- */
- REG_WRITE(addr, 0x0c, 0x0000);
-
- /* Priority Override: disable DA, SA and VTU priority override. */
- REG_WRITE(addr, 0x0d, 0x0000);
-
- /* Port Ethertype: use the Ethertype DSA Ethertype value. */
- REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
- /* Tag Remap: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x18, 0x3210);
-
- /* Tag Remap 2: use an identity 802.1p prio -> switch prio
- * mapping.
- */
- REG_WRITE(addr, 0x19, 0x7654);
-
- return mv88e6xxx_setup_port_common(ds, p);
-}
-
#ifdef CONFIG_NET_DSA_HWMON
static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
@@ -292,7 +144,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
- int i;
ret = mv88e6xxx_setup_common(ds);
if (ret < 0)
@@ -306,19 +157,11 @@ static int mv88e6352_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- /* @@@ initialise vtu and atu */
-
ret = mv88e6352_setup_global(ds);
if (ret < 0)
return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6352_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return mv88e6xxx_setup_ports(ds);
}
static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
@@ -552,3 +395,4 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
};
MODULE_ALIAS("platform:mv88e6352");
+MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf309aa92802..39530fa142b0 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -19,6 +19,34 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
+/* MDIO bus access can be nested in the case of PHYs connected to the
+ * internal MDIO bus of the switch, which is accessed via MDIO bus of
+ * the Ethernet interface. Avoid lockdep false positives by using
+ * mutex_lock_nested().
+ */
+static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+ int ret;
+
+ mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ ret = bus->read(bus, addr, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
+ u16 val)
+{
+ int ret;
+
+ mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ ret = bus->write(bus, addr, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
* use all 32 SMI bus addresses on its SMI bus, and all switch registers
* will be directly accessible on some {device address,register address}
@@ -33,7 +61,7 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
int i;
for (i = 0; i < 16; i++) {
- ret = mdiobus_read(bus, sw_addr, SMI_CMD);
+ ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
if (ret < 0)
return ret;
@@ -49,7 +77,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
int ret;
if (sw_addr == 0)
- return mdiobus_read(bus, addr, reg);
+ return mv88e6xxx_mdiobus_read(bus, addr, reg);
/* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -57,8 +85,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
return ret;
/* Transmit the read command. */
- ret = mdiobus_write(bus, sw_addr, SMI_CMD,
- SMI_CMD_OP_22_READ | (addr << 5) | reg);
+ ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_READ | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -68,7 +96,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
return ret;
/* Read the data. */
- ret = mdiobus_read(bus, sw_addr, SMI_DATA);
+ ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
if (ret < 0)
return ret;
@@ -112,7 +140,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int ret;
if (sw_addr == 0)
- return mdiobus_write(bus, addr, reg, val);
+ return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
/* Wait for the bus to become free. */
ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -120,13 +148,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
return ret;
/* Transmit the data to write. */
- ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
+ ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
if (ret < 0)
return ret;
/* Transmit the write command. */
- ret = mdiobus_write(bus, sw_addr, SMI_CMD,
- SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+ ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
if (ret < 0)
return ret;
@@ -165,24 +193,6 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
return ret;
}
-int mv88e6xxx_config_prio(struct dsa_switch *ds)
-{
- /* Configure the IP ToS mapping registers. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
-
- /* Configure the IEEE 802.1p priority mapping register. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
-
- return 0;
-}
-
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
@@ -217,20 +227,20 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
return 0;
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
{
if (addr >= 0)
- return mv88e6xxx_reg_read(ds, addr, regnum);
+ return _mv88e6xxx_reg_read(ds, addr, regnum);
return 0xffff;
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
u16 val)
{
if (addr >= 0)
- return mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return _mv88e6xxx_reg_write(ds, addr, regnum, val);
return 0;
}
@@ -434,26 +444,113 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
}
}
+static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6031:
+ case PORT_SWITCH_ID_6061:
+ case PORT_SWITCH_ID_6035:
+ case PORT_SWITCH_ID_6065:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6092:
+ case PORT_SWITCH_ID_6095:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6046:
+ case PORT_SWITCH_ID_6085:
+ case PORT_SWITCH_ID_6096:
+ case PORT_SWITCH_ID_6097:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6123:
+ case PORT_SWITCH_ID_6161:
+ case PORT_SWITCH_ID_6165:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6121:
+ case PORT_SWITCH_ID_6122:
+ case PORT_SWITCH_ID_6152:
+ case PORT_SWITCH_ID_6155:
+ case PORT_SWITCH_ID_6182:
+ case PORT_SWITCH_ID_6185:
+ case PORT_SWITCH_ID_6108:
+ case PORT_SWITCH_ID_6131:
+ return true;
+ }
+ return false;
+}
+
+static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ switch (ps->id) {
+ case PORT_SWITCH_ID_6171:
+ case PORT_SWITCH_ID_6175:
+ case PORT_SWITCH_ID_6350:
+ case PORT_SWITCH_ID_6351:
+ return true;
+ }
+ return false;
+}
+
static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
switch (ps->id) {
- case PORT_SWITCH_ID_6352:
case PORT_SWITCH_ID_6172:
case PORT_SWITCH_ID_6176:
+ case PORT_SWITCH_ID_6240:
+ case PORT_SWITCH_ID_6352:
return true;
}
return false;
}
-static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
{
int ret;
int i;
for (i = 0; i < 10; i++) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -461,7 +558,8 @@ static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
return -ETIMEDOUT;
}
-static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
{
int ret;
@@ -469,42 +567,45 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_CAPTURE_PORT |
- GLOBAL_STATS_OP_HIST_RX_TX | port);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (ret < 0)
+ return ret;
/* Wait for the snapshotting to complete. */
- ret = mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ds);
if (ret < 0)
return ret;
return 0;
}
-static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+/* Must be called with SMI mutex held */
+static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
{
u32 _val;
int ret;
*val = 0;
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
- GLOBAL_STATS_OP_READ_CAPTURED |
- GLOBAL_STATS_OP_HIST_RX_TX | stat);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
if (ret < 0)
return;
- ret = mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ds);
if (ret < 0)
return;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
if (ret < 0)
return;
_val = ret << 16;
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+ ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
if (ret < 0)
return;
@@ -587,11 +688,11 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
int ret;
int i;
- mutex_lock(&ps->stats_mutex);
+ mutex_lock(&ps->smi_mutex);
- ret = mv88e6xxx_stats_snapshot(ds, port);
+ ret = _mv88e6xxx_stats_snapshot(ds, port);
if (ret < 0) {
- mutex_unlock(&ps->stats_mutex);
+ mutex_unlock(&ps->smi_mutex);
return;
}
@@ -602,14 +703,14 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
u32 high = 0;
if (s->reg >= 0x100) {
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100);
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100);
if (ret < 0)
goto error;
low = ret;
if (s->sizeof_stat == 4) {
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
- s->reg - 0x100 + 1);
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ s->reg - 0x100 + 1);
if (ret < 0)
goto error;
high = ret;
@@ -617,14 +718,14 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
data[i] = (((u64)high) << 16) | low;
continue;
}
- mv88e6xxx_stats_read(ds, s->reg, &low);
+ _mv88e6xxx_stats_read(ds, s->reg, &low);
if (s->sizeof_stat == 8)
- mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
data[i] = (((u64)high) << 32) | low;
}
error:
- mutex_unlock(&ps->stats_mutex);
+ mutex_unlock(&ps->smi_mutex);
}
/* All the statistics in the table */
@@ -694,7 +795,7 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
*temp = 0;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
if (ret < 0)
@@ -727,19 +828,23 @@ int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
error:
_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
#endif /* CONFIG_NET_DSA_HWMON */
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+ u16 mask)
{
unsigned long timeout = jiffies + HZ / 10;
while (time_before(jiffies, timeout)) {
int ret;
- ret = REG_READ(reg, offset);
+ ret = _mv88e6xxx_reg_read(ds, reg, offset);
+ if (ret < 0)
+ return ret;
if (!(ret & mask))
return 0;
@@ -748,10 +853,22 @@ static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
return -ETIMEDOUT;
}
-int mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
{
- return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_BUSY);
+ return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_BUSY);
}
int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
@@ -767,56 +884,46 @@ int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
}
/* Must be called with SMI lock held */
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
-{
- unsigned long timeout = jiffies + HZ / 10;
-
- while (time_before(jiffies, timeout)) {
- int ret;
-
- ret = _mv88e6xxx_reg_read(ds, reg, offset);
- if (ret < 0)
- return ret;
- if (!(ret & mask))
- return 0;
-
- usleep_range(1000, 2000);
- }
- return -ETIMEDOUT;
-}
-
-/* Must be called with SMI lock held */
static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
{
return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
GLOBAL_ATU_OP_BUSY);
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
int regnum)
{
int ret;
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_READ | (addr << 5) |
+ regnum);
+ if (ret < 0)
+ return ret;
- ret = mv88e6xxx_phy_wait(ds);
+ ret = _mv88e6xxx_phy_wait(ds);
if (ret < 0)
return ret;
- return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
+ return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
}
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
int regnum, u16 val)
{
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
- GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
+ int ret;
- return mv88e6xxx_phy_wait(ds);
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
+ regnum);
+
+ return _mv88e6xxx_phy_wait(ds);
}
int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
@@ -824,7 +931,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
if (reg < 0)
@@ -833,7 +940,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
if (reg < 0)
goto out;
@@ -841,7 +948,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
reg = 0;
out:
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return reg;
}
@@ -852,7 +959,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
int reg;
int ret;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
if (ret < 0)
@@ -866,7 +973,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
out:
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1241,13 +1348,212 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
}
}
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret, fid;
+ u16 reg;
mutex_lock(&ps->smi_mutex);
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+ mv88e6xxx_6065_family(ds)) {
+ /* MAC Forcing register: don't force link, speed,
+ * duplex or flow control state to any particular
+ * values on physical ports, but force the CPU port
+ * and all DSA ports to their maximum bandwidth and
+ * full duplex.
+ */
+ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ if (dsa_is_cpu_port(ds, port) ||
+ ds->dsa_port_mask & (1 << port)) {
+ reg |= PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX;
+ if (mv88e6xxx_6065_family(ds))
+ reg |= PORT_PCS_CTRL_100;
+ else
+ reg |= PORT_PCS_CTRL_1000;
+ } else {
+ reg |= PORT_PCS_CTRL_UNFORCED;
+ }
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_PCS_CTRL, reg);
+ if (ret)
+ goto abort;
+ }
+
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ reg = 0;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+ mv88e6xxx_6185_family(ds))
+ reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+ PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
+ PORT_CONTROL_STATE_FORWARDING;
+ if (dsa_is_cpu_port(ds, port)) {
+ if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ reg |= PORT_CONTROL_DSA_TAG;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+ reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+ else
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ }
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+ mv88e6xxx_6185_family(ds)) {
+ if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+ reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+ }
+ }
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+ if (ds->dsa_port_mask & (1 << port))
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ if (port == dsa_upstream_port(ds))
+ reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+ PORT_CONTROL_FORWARD_UNKNOWN_MC;
+ }
+ if (reg) {
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_CONTROL, reg);
+ if (ret)
+ goto abort;
+ }
+
+ /* Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ reg = 0;
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6095_family(ds))
+ reg = PORT_CONTROL_2_MAP_DA;
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds))
+ reg |= PORT_CONTROL_2_JUMBO_10240;
+
+ if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+ /* Set the upstream port this port should use */
+ reg |= dsa_upstream_port(ds);
+ /* enable forwarding of unknown multicast addresses to
+ * the upstream port
+ */
+ if (port == dsa_upstream_port(ds))
+ reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+ }
+
+ if (reg) {
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_CONTROL_2, reg);
+ if (ret)
+ goto abort;
+ }
+
+ /* Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
+ 1 << port);
+ if (ret)
+ goto abort;
+
+ /* Egress rate control 2: disable egress rate control. */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+ 0x0000);
+ if (ret)
+ goto abort;
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ /* Do not limit the period of time that this port can
+ * be paused for by the remote end or the period of
+ * time that this port can pause the remote end.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_PAUSE_CTRL, 0x0000);
+ if (ret)
+ goto abort;
+
+ /* Port ATU control: disable limiting the number of
+ * address database entries that this port is allowed
+ * to use.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_ATU_CONTROL, 0x0000);
+ /* Priority Override: disable DA, SA and VTU priority
+ * override.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_PRI_OVERRIDE, 0x0000);
+ if (ret)
+ goto abort;
+
+ /* Port Ethertype: use the Ethertype DSA Ethertype
+ * value.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_ETH_TYPE, ETH_P_EDSA);
+ if (ret)
+ goto abort;
+ /* Tag Remap: use an identity 802.1p prio -> switch
+ * prio mapping.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_TAG_REGMAP_0123, 0x3210);
+ if (ret)
+ goto abort;
+
+ /* Tag Remap 2: use an identity 802.1p prio -> switch
+ * prio mapping.
+ */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_TAG_REGMAP_4567, 0x7654);
+ if (ret)
+ goto abort;
+ }
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ /* Rate Control: disable ingress rate limiting. */
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_RATE_CONTROL, 0x0001);
+ if (ret)
+ goto abort;
+ }
+
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
@@ -1281,13 +1587,25 @@ abort:
return ret;
}
+int mv88e6xxx_setup_ports(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+ int i;
+
+ for (i = 0; i < ps->num_ports; i++) {
+ ret = mv88e6xxx_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
int mv88e6xxx_setup_common(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
mutex_init(&ps->smi_mutex);
- mutex_init(&ps->stats_mutex);
- mutex_init(&ps->phy_mutex);
ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
@@ -1298,6 +1616,104 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
return 0;
}
+int mv88e6xxx_setup_global(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int i;
+
+ /* Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+
+ /* Configure the IP ToS mapping registers. */
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+
+ /* Configure the IEEE 802.1p priority mapping register. */
+ REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+
+ /* Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+ 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+ GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+
+ /* Program the DSA routing table. */
+ for (i = 0; i < 32; i++) {
+ int nexthop = 0x1f;
+
+ if (ds->pd->rtable &&
+ i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
+ GLOBAL2_DEVICE_MAPPING_UPDATE |
+ (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
+ nexthop);
+ }
+
+ /* Clear all trunk masks. */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+ 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+ ((1 << ps->num_ports) - 1));
+
+ /* Clear all trunk mappings. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
+ GLOBAL2_TRUNK_MAPPING_UPDATE |
+ (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+ /* Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+
+ /* Initialise cross-chip port VLAN table to reset
+ * defaults.
+ */
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+
+ /* Clear the priority override table. */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+ 0x8000 | (i << 8));
+ }
+
+ if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+ mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+ mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+ /* Disable ingress rate limiting by resetting all
+ * ingress rate limit registers to their initial
+ * state.
+ */
+ for (i = 0; i < ps->num_ports; i++)
+ REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
+ 0x9000 | (i << 8));
+ }
+
+ return 0;
+}
+
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -1343,14 +1759,14 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
if (ret < 0)
goto error;
ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
error:
_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1360,7 +1776,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
if (ret < 0)
goto error;
@@ -1368,7 +1784,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
error:
_mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1391,9 +1807,9 @@ mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1407,9 +1823,9 @@ mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1423,9 +1839,9 @@ mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -1440,9 +1856,9 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
if (addr < 0)
return addr;
- mutex_lock(&ps->phy_mutex);
+ mutex_lock(&ps->smi_mutex);
ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
- mutex_unlock(&ps->phy_mutex);
+ mutex_unlock(&ps->smi_mutex);
return ret;
}
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index e045154f3364..e10ccdb4ffbc 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -40,9 +40,31 @@
#define PORT_STATUS_TX_PAUSED BIT(5)
#define PORT_STATUS_FLOW_CTRL BIT(4)
#define PORT_PCS_CTRL 0x01
+#define PORT_PCS_CTRL_FC BIT(7)
+#define PORT_PCS_CTRL_FORCE_FC BIT(6)
+#define PORT_PCS_CTRL_LINK_UP BIT(5)
+#define PORT_PCS_CTRL_FORCE_LINK BIT(4)
+#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3)
+#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2)
+#define PORT_PCS_CTRL_10 0x00
+#define PORT_PCS_CTRL_100 0x01
+#define PORT_PCS_CTRL_1000 0x02
+#define PORT_PCS_CTRL_UNFORCED 0x03
+#define PORT_PAUSE_CTRL 0x02
#define PORT_SWITCH_ID 0x03
+#define PORT_SWITCH_ID_6031 0x0310
+#define PORT_SWITCH_ID_6035 0x0350
+#define PORT_SWITCH_ID_6046 0x0480
+#define PORT_SWITCH_ID_6061 0x0610
+#define PORT_SWITCH_ID_6065 0x0650
#define PORT_SWITCH_ID_6085 0x04a0
+#define PORT_SWITCH_ID_6092 0x0970
#define PORT_SWITCH_ID_6095 0x0950
+#define PORT_SWITCH_ID_6096 0x0980
+#define PORT_SWITCH_ID_6097 0x0990
+#define PORT_SWITCH_ID_6108 0x1070
+#define PORT_SWITCH_ID_6121 0x1040
+#define PORT_SWITCH_ID_6122 0x1050
#define PORT_SWITCH_ID_6123 0x1210
#define PORT_SWITCH_ID_6123_A1 0x1212
#define PORT_SWITCH_ID_6123_A2 0x1213
@@ -58,13 +80,38 @@
#define PORT_SWITCH_ID_6165_A2 0x1653
#define PORT_SWITCH_ID_6171 0x1710
#define PORT_SWITCH_ID_6172 0x1720
+#define PORT_SWITCH_ID_6175 0x1750
#define PORT_SWITCH_ID_6176 0x1760
#define PORT_SWITCH_ID_6182 0x1a60
#define PORT_SWITCH_ID_6185 0x1a70
+#define PORT_SWITCH_ID_6240 0x2400
+#define PORT_SWITCH_ID_6320 0x1250
+#define PORT_SWITCH_ID_6350 0x3710
+#define PORT_SWITCH_ID_6351 0x3750
#define PORT_SWITCH_ID_6352 0x3520
#define PORT_SWITCH_ID_6352_A0 0x3521
#define PORT_SWITCH_ID_6352_A1 0x3522
#define PORT_CONTROL 0x04
+#define PORT_CONTROL_USE_CORE_TAG BIT(15)
+#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
+#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12)
+#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12)
+#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12)
+#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12)
+#define PORT_CONTROL_HEADER BIT(11)
+#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10)
+#define PORT_CONTROL_DOUBLE_TAG BIT(9)
+#define PORT_CONTROL_FRAME_MODE_NORMAL (0x0 << 8)
+#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8)
+#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8)
+#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8)
+#define PORT_CONTROL_DSA_TAG BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
+#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
+#define PORT_CONTROL_USE_IP BIT(5)
+#define PORT_CONTROL_USE_TAG BIT(4)
+#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
+#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
#define PORT_CONTROL_STATE_MASK 0x03
#define PORT_CONTROL_STATE_DISABLED 0x00
#define PORT_CONTROL_STATE_BLOCKING 0x01
@@ -74,15 +121,32 @@
#define PORT_BASE_VLAN 0x06
#define PORT_DEFAULT_VLAN 0x07
#define PORT_CONTROL_2 0x08
+#define PORT_CONTROL_2_IGNORE_FCS BIT(15)
+#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14)
+#define PORT_CONTROL_2_SA_PRIO_OVERRIDE BIT(13)
+#define PORT_CONTROL_2_DA_PRIO_OVERRIDE BIT(12)
+#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12)
+#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12)
+#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12)
+#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9)
+#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
+#define PORT_CONTROL_2_MAP_DA BIT(7)
+#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
+#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
+#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
+#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
#define PORT_RATE_CONTROL 0x09
#define PORT_RATE_CONTROL_2 0x0a
#define PORT_ASSOC_VECTOR 0x0b
+#define PORT_ATU_CONTROL 0x0c
+#define PORT_PRI_OVERRIDE 0x0d
+#define PORT_ETH_TYPE 0x0f
#define PORT_IN_DISCARD_LO 0x10
#define PORT_IN_DISCARD_HI 0x11
#define PORT_IN_FILTERED 0x12
#define PORT_OUT_FILTERED 0x13
-#define PORT_TAG_REGMAP_0123 0x19
-#define PORT_TAG_REGMAP_4567 0x1a
+#define PORT_TAG_REGMAP_0123 0x18
+#define PORT_TAG_REGMAP_4567 0x19
#define REG_GLOBAL 0x1b
#define GLOBAL_STATUS 0x00
@@ -102,7 +166,7 @@
#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */
#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */
#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */
-#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
#define GLOBAL_CONTROL_DEVICE_EN BIT(7)
#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6)
#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5)
@@ -117,6 +181,7 @@
#define GLOBAL_VTU_DATA_4_7 0x08
#define GLOBAL_VTU_DATA_8_11 0x09
#define GLOBAL_ATU_CONTROL 0x0a
+#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3)
#define GLOBAL_ATU_OP 0x0b
#define GLOBAL_ATU_OP_BUSY BIT(15)
#define GLOBAL_ATU_OP_NOP (0 << 12)
@@ -151,7 +216,15 @@
#define GLOBAL_IEEE_PRI 0x18
#define GLOBAL_CORE_TAG_TYPE 0x19
#define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12
+#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8
+#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4
+#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0
+#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0)
#define GLOBAL_CONTROL_2 0x1c
+#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000
+#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000
+
#define GLOBAL_STATS_OP 0x1d
#define GLOBAL_STATS_OP_BUSY BIT(15)
#define GLOBAL_STATS_OP_NOP (0 << 12)
@@ -172,9 +245,20 @@
#define GLOBAL2_MGMT_EN_0X 0x03
#define GLOBAL2_FLOW_CONTROL 0x04
#define GLOBAL2_SWITCH_MGMT 0x05
+#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15)
+#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14)
+#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13)
+#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7)
+#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3)
#define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15)
+#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8
#define GLOBAL2_TRUNK_MASK 0x07
+#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15)
+#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12
#define GLOBAL2_TRUNK_MAPPING 0x08
+#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15)
+#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11
#define GLOBAL2_INGRESS_OP 0x09
#define GLOBAL2_INGRESS_DATA 0x0a
#define GLOBAL2_PVT_ADDR 0x0b
@@ -183,6 +267,10 @@
#define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
#define GLOBAL2_ATU_STATS 0x0e
#define GLOBAL2_PRIO_OVERRIDE 0x0f
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7)
+#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
+#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
#define GLOBAL2_EEPROM_OP 0x14
#define GLOBAL2_EEPROM_OP_BUSY BIT(15)
#define GLOBAL2_EEPROM_OP_LOAD BIT(11)
@@ -260,14 +348,14 @@ struct mv88e6xxx_hw_stat {
};
int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
+int mv88e6xxx_setup_ports(struct dsa_switch *ds);
int mv88e6xxx_setup_common(struct dsa_switch *ds);
+int mv88e6xxx_setup_global(struct dsa_switch *ds);
int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
int reg, u16 val);
int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_config_prio(struct dsa_switch *ds);
int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
@@ -289,7 +377,6 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p);
int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_phy_wait(struct dsa_switch *ds);
int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index eadcb053807e..9a8308553520 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1367afcd0a8b..4395d99115a0 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 426916036151..acd53173fcc0 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,10 +179,8 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+ depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
depends on ARM64 || COMPILE_TEST
- select PHYLIB
- select AMD_XGBE_PHY
select BITREVERSE
select CRC32
select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 34c28aac767f..b6fa89102526 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -857,6 +857,48 @@
*/
#define PCS_MMD_SELECT 0xff
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
/* Descriptor/Packet entry bit positions and sizes */
#define RX_PACKET_ERRORS_CRC_INDEX 2
#define RX_PACKET_ERRORS_CRC_WIDTH 1
@@ -973,10 +1015,47 @@
#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
#ifndef MDIO_AN_COMP_STAT
#define MDIO_AN_COMP_STAT 0x0030
#endif
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* MDIO mask values */
+#define XGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define XGBE_XNP_ACK_PROCESSED BIT(12)
+#define XGBE_XNP_MP_FORMATTED BIT(13)
+#define XGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define XGBE_KR_TRAINING_START BIT(0)
+#define XGBE_KR_TRAINING_ENABLE BIT(1)
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
@@ -1119,6 +1198,82 @@ do { \
ioread32((_pdata)->xpcs_regs + (_off))
/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ ioread16((_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
* using MDIO. Different from above because of the use of standardized
* Linux include values. No shifting is performed with the bit
* operations, everything works on mask values.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 8a50b01c2686..a6b9899e285f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
tc_ets = 0;
tc_ets_weight = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
- ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
- DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+ netif_dbg(pdata, drv, netdev,
+ "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+ ets->tc_tsa[i]);
+ netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
+ ets->prio_tc[i]);
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
(i >= pdata->hw_feat.tc_cnt))
@@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
- pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+ netif_dbg(pdata, drv, netdev,
+ "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
if (!pdata->pfc) {
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
@@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
u8 support = xgbe_dcb_getdcbx(netdev);
- DBGPR(" DCBX=%#hhx\n", dcbx);
+ netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
if (dcbx & ~support)
return 1;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index d81fc6bd4759..dd03ad865caf 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
if (!ring->rdata)
return -ENOMEM;
- DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
- ring->rdesc, ring->rdesc_dma, ring->rdata);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
+ ring->rdesc, &ring->rdesc_dma, ring->rdata);
DBGPR("<--xgbe_init_ring\n");
@@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- DBGPR(" %s - tx_ring:\n", channel->name);
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count);
if (ret) {
@@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
goto err_ring;
}
- DBGPR(" %s - rx_ring:\n", channel->name);
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+ channel->name);
+
ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count);
if (ret) {
netdev_alert(pdata->netdev,
- "error initializing Tx ring\n");
+ "error initializing Rx ring\n");
goto err_ring;
}
}
@@ -476,8 +481,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
if (rdata->state_saved) {
rdata->state_saved = 0;
- rdata->state.incomplete = 0;
- rdata->state.context_next = 0;
rdata->state.skb = NULL;
rdata->state.len = 0;
rdata->state.error = 0;
@@ -518,8 +521,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
if (tso) {
- DBGPR(" TSO packet\n");
-
/* Map the TSO header */
skb_dma = dma_map_single(pdata->dev, skb->data,
packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +530,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = packet->header_len;
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb header: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, packet->header_len);
offset = packet->header_len;
@@ -550,8 +554,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
- DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
- cur_index, skb_dma, len);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb data: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
datalen -= len;
offset += len;
@@ -563,7 +568,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- DBGPR(" mapping frag %u\n", i);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "mapping frag %u\n", i);
frag = &skb_shinfo(skb)->frags[i];
offset = 0;
@@ -582,8 +588,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
rdata->skb_dma = skb_dma;
rdata->skb_dma_len = len;
rdata->mapped_as_page = 1;
- DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
- cur_index, skb_dma, len);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "skb frag: index=%u, dma=%pad, len=%u\n",
+ cur_index, &skb_dma, len);
datalen -= len;
offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 21d9497518fd..506e832c9e9a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
return 0;
- DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+ enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
return 0;
- DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+ enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
mac_addr[0] = ha->addr[4];
mac_addr[1] = ha->addr[5];
- DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
- *mac_reg);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "adding mac address %pM at %#x\n",
+ ha->addr, *mac_reg);
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
@@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
- /* If the PCS is changing modes, match the MAC speed to it */
- if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
- ((mmd_address & 0xffff) == MDIO_CTRL2)) {
- struct phy_device *phydev = pdata->phydev;
-
- if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
- /* KX mode */
- if (phydev->supported & SUPPORTED_1000baseKX_Full)
- xgbe_set_gmii_speed(pdata);
- else
- xgbe_set_gmii_2500_speed(pdata);
- } else {
- /* KR mode */
- xgbe_set_xgmii_speed(pdata);
- }
- }
-
/* The PCS registers are accessed using mmio. The underlying APB3
* management interface uses indirect addressing to access the MMD
* register sets. This requires accessing of the PCS register in two
@@ -1322,7 +1308,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
- DBGPR(" TC%u using SP\n", i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using SP\n", i);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_SP);
break;
@@ -1330,7 +1317,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
weight = total_weight * ets->tc_tx_bw[i] / 100;
weight = clamp(weight, min_weight, total_weight);
- DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TC%u using DWRR (weight %u)\n", i, weight);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
MTL_TSA_ETS);
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1347,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
}
mask &= 0xff;
- DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
+ tc, mask);
reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -1457,8 +1446,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
/* Create a context descriptor if this is a TSO packet */
if (tso_context || vlan_context) {
if (tso_context) {
- DBGPR(" TSO context descriptor, mss=%u\n",
- packet->mss);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "TSO context descriptor, mss=%u\n",
+ packet->mss);
/* Set the MSS size */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1466,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
}
if (vlan_context) {
- DBGPR(" VLAN context descriptor, ctag=%u\n",
- packet->vlan_ctag);
+ netif_dbg(pdata, tx_queued, pdata->netdev,
+ "VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
/* Mark it as a CONTEXT descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1533,6 +1524,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
packet->tcp_payload_len);
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
packet->tcp_header_len / 4);
+
+ pdata->ext_stats.tx_tso_packets++;
} else {
/* Enable CRC and Pad Insertion */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
@@ -1594,9 +1587,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
- xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
-#endif
+ if (netif_msg_tx_queued(pdata))
+ xgbe_dump_tx_desc(pdata, ring, start_index,
+ packet->rdesc_count, 1);
/* Make sure ownership is written to the descriptor */
dma_wmb();
@@ -1618,11 +1611,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
static int xgbe_dev_read(struct xgbe_channel *channel)
{
+ struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- struct net_device *netdev = channel->pdata->netdev;
+ struct net_device *netdev = pdata->netdev;
unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1637,9 +1631,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
/* Make sure descriptor fields are read after reading the OWN bit */
dma_rmb();
-#ifdef XGMAC_ENABLE_RX_DESC_DUMP
- xgbe_dump_rx_desc(ring, rdesc, ring->cur);
-#endif
+ if (netif_msg_rx_status(pdata))
+ xgbe_dump_rx_desc(pdata, ring, ring->cur);
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
/* Timestamp Context Descriptor */
@@ -1661,9 +1654,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
CONTEXT_NEXT, 1);
/* Get the header length */
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
+ if (rdata->rx.hdr_len)
+ pdata->ext_stats.rx_split_header_packets++;
+ }
/* Get the RSS hash */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
@@ -1700,14 +1696,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
INCOMPLETE, 0);
/* Set checksum done indicator as appropriate */
- if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ if (netdev->features & NETIF_F_RXCSUM)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CSUM_DONE, 1);
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
- DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+ netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
if (!err || !etlt) {
/* No error if err is 0 or etlt is 0 */
@@ -1718,7 +1714,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
RX_NORMAL_DESC0,
OVT);
- DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+ packet->vlan_ctag);
}
} else {
if ((etlt == 0x05) || (etlt == 0x06))
@@ -2026,9 +2023,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
- netdev_notice(pdata->netdev,
- "%d Tx hardware queues, %d byte fifo per queue\n",
- pdata->tx_q_count, ((fifo_size + 1) * 256));
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2042,9 +2039,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
- netdev_notice(pdata->netdev,
- "%d Rx hardware queues, %d byte fifo per queue\n",
- pdata->rx_q_count, ((fifo_size + 1) * 256));
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2063,14 +2060,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
- DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
if (i < qptc_extra) {
- DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
@@ -2088,13 +2087,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
- DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
if (i < ppq_extra) {
- DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 9fd6c69a8bac..1e9c28d19ef8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
channel->rx_ring = rx_ring++;
}
- DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
- channel->name, channel->queue_index, channel->dma_regs,
- channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->dma_regs, channel->dma_irq,
+ channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
@@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
struct xgbe_prv_data *pdata = channel->pdata;
if (count > xgbe_tx_avail_desc(ring)) {
- DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_info(pdata, drv, pdata->netdev,
+ "Tx queue stopped, not enough descriptors available\n");
netif_stop_subqueue(pdata->netdev, channel->queue_index);
ring->tx.queue_stopped = 1;
@@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (!dma_isr)
goto isr_done;
- DBGPR(" DMA_ISR = %08x\n", dma_isr);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
channel = pdata->channel + i;
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
- DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+ i, dma_ch_isr);
/* The TI or RI interrupt bits may still be set even if using
* per channel DMA interrupts. Check to be sure those are not
@@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
}
}
- DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
-
isr_done:
return IRQ_HANDLED;
}
@@ -436,43 +437,61 @@ static void xgbe_tx_timer(unsigned long data)
DBGPR("<--xgbe_tx_timer\n");
}
-static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_service(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ service_work);
+
+ pdata->phy_if.phy_status(pdata);
+}
+
+static void xgbe_service_timer(unsigned long data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ schedule_work(&pdata->service_work);
+
+ mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_init_timers(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
- DBGPR("-->xgbe_init_tx_timers\n");
+ setup_timer(&pdata->service_timer, xgbe_service_timer,
+ (unsigned long)pdata);
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
- DBGPR(" %s adding tx timer\n", channel->name);
setup_timer(&channel->tx_timer, xgbe_tx_timer,
(unsigned long)channel);
}
+}
- DBGPR("<--xgbe_init_tx_timers\n");
+static void xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+ mod_timer(&pdata->service_timer, jiffies + HZ);
}
-static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
unsigned int i;
- DBGPR("-->xgbe_stop_tx_timers\n");
+ del_timer_sync(&pdata->service_timer);
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
break;
- DBGPR(" %s deleting tx timer\n", channel->name);
del_timer_sync(&channel->tx_timer);
}
-
- DBGPR("<--xgbe_stop_tx_timers\n");
}
void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -512,6 +531,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
RXFIFOSIZE);
hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
TXFIFOSIZE);
+ hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
@@ -759,112 +779,12 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_rx_data\n");
}
-static void xgbe_adjust_link(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct phy_device *phydev = pdata->phydev;
- int new_state = 0;
-
- if (!phydev)
- return;
-
- if (phydev->link) {
- /* Flow control support */
- if (pdata->pause_autoneg) {
- if (phydev->pause || phydev->asym_pause) {
- pdata->tx_pause = 1;
- pdata->rx_pause = 1;
- } else {
- pdata->tx_pause = 0;
- pdata->rx_pause = 0;
- }
- }
-
- if (pdata->tx_pause != pdata->phy_tx_pause) {
- hw_if->config_tx_flow_control(pdata);
- pdata->phy_tx_pause = pdata->tx_pause;
- }
-
- if (pdata->rx_pause != pdata->phy_rx_pause) {
- hw_if->config_rx_flow_control(pdata);
- pdata->phy_rx_pause = pdata->rx_pause;
- }
-
- /* Speed support */
- if (phydev->speed != pdata->phy_speed) {
- new_state = 1;
-
- switch (phydev->speed) {
- case SPEED_10000:
- hw_if->set_xgmii_speed(pdata);
- break;
-
- case SPEED_2500:
- hw_if->set_gmii_2500_speed(pdata);
- break;
-
- case SPEED_1000:
- hw_if->set_gmii_speed(pdata);
- break;
- }
- pdata->phy_speed = phydev->speed;
- }
-
- if (phydev->link != pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 1;
- }
- } else if (pdata->phy_link) {
- new_state = 1;
- pdata->phy_link = 0;
- pdata->phy_speed = SPEED_UNKNOWN;
- }
-
- if (new_state)
- phy_print_status(phydev);
-}
-
static int xgbe_phy_init(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
- struct phy_device *phydev = pdata->phydev;
- int ret;
-
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
- pdata->phy_tx_pause = pdata->tx_pause;
- pdata->phy_rx_pause = pdata->rx_pause;
- ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
- pdata->phy_mode);
- if (ret) {
- netdev_err(netdev, "phy_connect_direct failed\n");
- return ret;
- }
-
- if (!phydev->drv || (phydev->drv->phy_id == 0)) {
- netdev_err(netdev, "phy_id not valid\n");
- ret = -ENODEV;
- goto err_phy_connect;
- }
- DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
- dev_name(&phydev->dev), phydev->link);
-
- return 0;
-
-err_phy_connect:
- phy_disconnect(phydev);
-
- return ret;
-}
-
-static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
-{
- if (!pdata->phydev)
- return;
-
- phy_disconnect(pdata->phydev);
+ return pdata->phy_if.phy_reset(pdata);
}
int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
@@ -889,13 +809,14 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
netif_tx_stop_all_queues(netdev);
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
xgbe_napi_disable(pdata, 0);
- phy_stop(pdata->phydev);
-
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -924,8 +845,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
pdata->power_down = 0;
- phy_start(pdata->phydev);
-
xgbe_napi_enable(pdata, 0);
hw_if->powerup_tx(pdata);
@@ -936,6 +855,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
netif_tx_start_all_queues(netdev);
+ xgbe_start_timers(pdata);
+
spin_unlock_irqrestore(&pdata->lock, flags);
DBGPR("<--xgbe_powerup\n");
@@ -946,6 +867,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct net_device *netdev = pdata->netdev;
int ret;
@@ -953,7 +875,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->init(pdata);
- phy_start(pdata->phydev);
+ ret = phy_if->phy_start(pdata);
+ if (ret)
+ goto err_phy;
xgbe_napi_enable(pdata, 1);
@@ -964,10 +888,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
- xgbe_init_tx_timers(pdata);
-
netif_tx_start_all_queues(netdev);
+ xgbe_start_timers(pdata);
+ schedule_work(&pdata->service_work);
+
DBGPR("<--xgbe_start\n");
return 0;
@@ -975,8 +900,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
err_napi:
xgbe_napi_disable(pdata, 1);
- phy_stop(pdata->phydev);
+ phy_if->phy_stop(pdata);
+err_phy:
hw_if->exit(pdata);
return ret;
@@ -985,6 +911,7 @@ err_napi:
static void xgbe_stop(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
struct xgbe_channel *channel;
struct net_device *netdev = pdata->netdev;
struct netdev_queue *txq;
@@ -994,7 +921,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
netif_tx_stop_all_queues(netdev);
- xgbe_stop_tx_timers(pdata);
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
@@ -1003,7 +931,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
xgbe_napi_disable(pdata, 1);
- phy_stop(pdata->phydev);
+ phy_if->phy_stop(pdata);
hw_if->exit(pdata);
@@ -1374,7 +1302,7 @@ static int xgbe_open(struct net_device *netdev)
ret = clk_prepare_enable(pdata->sysclk);
if (ret) {
netdev_alert(netdev, "dma clk_prepare_enable failed\n");
- goto err_phy_init;
+ return ret;
}
ret = clk_prepare_enable(pdata->ptpclk);
@@ -1399,14 +1327,17 @@ static int xgbe_open(struct net_device *netdev)
if (ret)
goto err_channels;
- /* Initialize the device restart and Tx timestamp work struct */
+ INIT_WORK(&pdata->service_work, xgbe_service);
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ xgbe_init_timers(pdata);
ret = xgbe_start(pdata);
if (ret)
goto err_rings;
+ clear_bit(XGBE_DOWN, &pdata->dev_state);
+
DBGPR("<--xgbe_open\n");
return 0;
@@ -1423,9 +1354,6 @@ err_ptpclk:
err_sysclk:
clk_disable_unprepare(pdata->sysclk);
-err_phy_init:
- xgbe_phy_exit(pdata);
-
return ret;
}
@@ -1449,8 +1377,7 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- /* Release the phy */
- xgbe_phy_exit(pdata);
+ set_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_close\n");
@@ -1478,7 +1405,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
if (skb->len == 0) {
- netdev_err(netdev, "empty skb received from stack\n");
+ netif_err(pdata, tx_err, netdev,
+ "empty skb received from stack\n");
dev_kfree_skb_any(skb);
goto tx_netdev_return;
}
@@ -1494,7 +1422,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = xgbe_prep_tso(skb, packet);
if (ret) {
- netdev_err(netdev, "error processing TSO packet\n");
+ netif_err(pdata, tx_err, netdev,
+ "error processing TSO packet\n");
dev_kfree_skb_any(skb);
goto tx_netdev_return;
}
@@ -1513,9 +1442,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Configure required descriptor fields for transmission */
hw_if->dev_xmit(channel);
-#ifdef XGMAC_ENABLE_TX_PKT_DUMP
- xgbe_print_pkt(netdev, skb, true);
-#endif
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, true);
/* Stop the queue in advance if there may not be enough descriptors */
xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@ -1710,7 +1638,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
(pdata->q2tc_map[queue] == i))
queue++;
- DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
+ netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
+ i, offset, queue - 1);
netdev_set_tc_queue(netdev, i, queue - offset, offset);
offset = queue;
}
@@ -1820,9 +1749,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
lower_32_bits(rdata->rdesc_dma));
}
-static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+ struct napi_struct *napi,
struct xgbe_ring_data *rdata,
- unsigned int *len)
+ unsigned int len)
{
struct sk_buff *skb;
u8 *packet;
@@ -1832,14 +1762,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
if (!skb)
return NULL;
+ /* Start with the header buffer which may contain just the header
+ * or the header plus data
+ */
+ dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
+ rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
copy_len = min(rdata->rx.hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
- *len -= copy_len;
+ len -= copy_len;
+ if (len) {
+ /* Add the remaining data as a frag */
+ dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
+ rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
return skb;
}
@@ -1877,9 +1824,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
* bit */
dma_rmb();
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
- xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
-#endif
+ if (netif_msg_tx_done(pdata))
+ xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
if (hw_if->is_last_desc(rdesc)) {
tx_packets += rdata->tx.packets;
@@ -1922,7 +1868,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
unsigned int incomplete, error, context_next, context;
- unsigned int len, put_len, max_len;
+ unsigned int len, rdesc_len, max_len;
unsigned int received = 0;
int packet_count = 0;
@@ -1932,6 +1878,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
if (!ring)
return 0;
+ incomplete = 0;
+ context_next = 0;
+
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1941,15 +1890,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
/* First time in loop see if we need to restore state */
if (!received && rdata->state_saved) {
- incomplete = rdata->state.incomplete;
- context_next = rdata->state.context_next;
skb = rdata->state.skb;
error = rdata->state.error;
len = rdata->state.len;
} else {
memset(packet, 0, sizeof(*packet));
- incomplete = 0;
- context_next = 0;
skb = NULL;
error = 0;
len = 0;
@@ -1983,29 +1928,23 @@ read_again:
if (error || packet->errors) {
if (packet->errors)
- DBGPR("Error in received packet\n");
+ netif_err(pdata, rx_err, netdev,
+ "error in received packet\n");
dev_kfree_skb(skb);
goto next_packet;
}
if (!context) {
- put_len = rdata->rx.len - len;
- len += put_len;
-
- if (!skb) {
- dma_sync_single_for_cpu(pdata->dev,
- rdata->rx.hdr.dma,
- rdata->rx.hdr.dma_len,
- DMA_FROM_DEVICE);
-
- skb = xgbe_create_skb(napi, rdata, &put_len);
- if (!skb) {
+ /* Length is cumulative, get this descriptor's length */
+ rdesc_len = rdata->rx.len - len;
+ len += rdesc_len;
+
+ if (rdesc_len && !skb) {
+ skb = xgbe_create_skb(pdata, napi, rdata,
+ rdesc_len);
+ if (!skb)
error = 1;
- goto skip_data;
- }
- }
-
- if (put_len) {
+ } else if (rdesc_len) {
dma_sync_single_for_cpu(pdata->dev,
rdata->rx.buf.dma,
rdata->rx.buf.dma_len,
@@ -2014,12 +1953,12 @@ read_again:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
- put_len, rdata->rx.buf.dma_len);
+ rdesc_len,
+ rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
-skip_data:
if (incomplete || context_next)
goto read_again;
@@ -2033,14 +1972,14 @@ skip_data:
max_len += VLAN_HLEN;
if (skb->len > max_len) {
- DBGPR("packet length exceeds configured MTU\n");
+ netif_err(pdata, rx_err, netdev,
+ "packet length exceeds configured MTU\n");
dev_kfree_skb(skb);
goto next_packet;
}
-#ifdef XGMAC_ENABLE_RX_PKT_DUMP
- xgbe_print_pkt(netdev, skb, false);
-#endif
+ if (netif_msg_pktdata(pdata))
+ xgbe_print_pkt(netdev, skb, false);
skb_checksum_none_assert(skb);
if (XGMAC_GET_BITS(packet->attributes,
@@ -2072,7 +2011,6 @@ skip_data:
skb_record_rx_queue(skb, channel->queue_index);
skb_mark_napi_id(skb, napi);
- netdev->last_rx = jiffies;
napi_gro_receive(napi, skb);
next_packet:
@@ -2083,8 +2021,6 @@ next_packet:
if (received && (incomplete || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
- rdata->state.incomplete = incomplete;
- rdata->state.context_next = context_next;
rdata->state.skb = skb;
rdata->state.len = len;
rdata->state.error = error;
@@ -2165,8 +2101,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
return processed;
}
-void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
- unsigned int count, unsigned int flag)
+void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+ unsigned int idx, unsigned int count, unsigned int flag)
{
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
@@ -2174,20 +2110,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
- pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ netdev_dbg(pdata->netdev,
+ "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0),
+ le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2),
+ le32_to_cpu(rdesc->desc3));
idx++;
}
}
-void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
unsigned int idx)
{
- pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
- le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
- le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ netdev_dbg(pdata->netdev,
+ "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+ idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
}
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
@@ -2197,21 +2142,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
unsigned char buffer[128];
unsigned int i, j;
- netdev_alert(netdev, "\n************** SKB dump ****************\n");
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
- netdev_alert(netdev, "%s packet of %d bytes\n",
- (tx_rx ? "TX" : "RX"), skb->len);
+ netdev_dbg(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
- netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
- netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
for (i = 0, j = 0; i < skb->len;) {
j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
buf[i++]);
if ((i % 32) == 0) {
- netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
j = 0;
} else if ((i % 16) == 0) {
buffer[j++] = ' ';
@@ -2221,7 +2166,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
}
}
if (i % 32)
- netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+ netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
- netdev_alert(netdev, "\n************** SKB dump ****************\n");
+ netdev_dbg(netdev, "\n************** SKB dump ****************\n");
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 5f149e8ee20f..59e090e95c0e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -133,6 +133,12 @@ struct xgbe_stats {
offsetof(struct xgbe_prv_data, mmc_stats._var), \
}
+#define XGMAC_EXT_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
+ offsetof(struct xgbe_prv_data, ext_stats._var), \
+ }
+
static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
XGMAC_MMC_STAT("tx_packets", txframecount_gb),
@@ -140,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
@@ -171,6 +178,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+ XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
};
#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
@@ -239,9 +247,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
DBGPR("-->xgbe_get_pauseparam\n");
- pause->autoneg = pdata->pause_autoneg;
- pause->tx_pause = pdata->tx_pause;
- pause->rx_pause = pdata->rx_pause;
+ pause->autoneg = pdata->phy.pause_autoneg;
+ pause->tx_pause = pdata->phy.tx_pause;
+ pause->rx_pause = pdata->phy.rx_pause;
DBGPR("<--xgbe_get_pauseparam\n");
}
@@ -250,7 +258,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct phy_device *phydev = pdata->phydev;
int ret = 0;
DBGPR("-->xgbe_set_pauseparam\n");
@@ -258,21 +265,26 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
pause->autoneg, pause->tx_pause, pause->rx_pause);
- pdata->pause_autoneg = pause->autoneg;
- if (pause->autoneg) {
- phydev->advertising |= ADVERTISED_Pause;
- phydev->advertising |= ADVERTISED_Asym_Pause;
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+ return -EINVAL;
+
+ pdata->phy.pause_autoneg = pause->autoneg;
+ pdata->phy.tx_pause = pause->tx_pause;
+ pdata->phy.rx_pause = pause->rx_pause;
- } else {
- phydev->advertising &= ~ADVERTISED_Pause;
- phydev->advertising &= ~ADVERTISED_Asym_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
- pdata->tx_pause = pause->tx_pause;
- pdata->rx_pause = pause->rx_pause;
+ if (pause->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
}
+ if (pause->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
if (netif_running(netdev))
- ret = phy_start_aneg(phydev);
+ ret = pdata->phy_if.phy_config_aneg(pdata);
DBGPR("<--xgbe_set_pauseparam\n");
@@ -283,36 +295,39 @@ static int xgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
DBGPR("-->xgbe_get_settings\n");
- if (!pdata->phydev)
- return -ENODEV;
+ cmd->phy_address = pdata->phy.address;
+
+ cmd->supported = pdata->phy.supported;
+ cmd->advertising = pdata->phy.advertising;
+ cmd->lp_advertising = pdata->phy.lp_advertising;
+
+ cmd->autoneg = pdata->phy.autoneg;
+ ethtool_cmd_speed_set(cmd, pdata->phy.speed);
+ cmd->duplex = pdata->phy.duplex;
- ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->port = PORT_NONE;
+ cmd->transceiver = XCVR_INTERNAL;
DBGPR("<--xgbe_get_settings\n");
- return ret;
+ return 0;
}
static int xgbe_set_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct phy_device *phydev = pdata->phydev;
u32 speed;
int ret;
DBGPR("-->xgbe_set_settings\n");
- if (!pdata->phydev)
- return -ENODEV;
-
speed = ethtool_cmd_speed(cmd);
- if (cmd->phy_address != phydev->addr)
+ if (cmd->phy_address != pdata->phy.address)
return -EINVAL;
if ((cmd->autoneg != AUTONEG_ENABLE) &&
@@ -333,23 +348,23 @@ static int xgbe_set_settings(struct net_device *netdev,
return -EINVAL;
}
- cmd->advertising &= phydev->supported;
+ cmd->advertising &= pdata->phy.supported;
if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
return -EINVAL;
ret = 0;
- phydev->autoneg = cmd->autoneg;
- phydev->speed = speed;
- phydev->duplex = cmd->duplex;
- phydev->advertising = cmd->advertising;
+ pdata->phy.autoneg = cmd->autoneg;
+ pdata->phy.speed = speed;
+ pdata->phy.duplex = cmd->duplex;
+ pdata->phy.advertising = cmd->advertising;
if (cmd->autoneg == AUTONEG_ENABLE)
- phydev->advertising |= ADVERTISED_Autoneg;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
if (netif_running(netdev))
- ret = phy_start_aneg(phydev);
+ ret = pdata->phy_if.phy_config_aneg(pdata);
DBGPR("<--xgbe_set_settings\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 714905384900..fb7c961da49f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -124,9 +124,11 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
+#include <linux/mdio.h>
#include "xgbe.h"
#include "xgbe-common.h"
@@ -136,6 +138,49 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(XGBE_DRV_VERSION);
MODULE_DESCRIPTION(XGBE_DRV_DESC);
+static int debug = -1;
+module_param(debug, int, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+static const u32 xgbe_serdes_blwc[] = {
+ XGBE_SPEED_1000_BLWC,
+ XGBE_SPEED_2500_BLWC,
+ XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_serdes_cdr_rate[] = {
+ XGBE_SPEED_1000_CDR,
+ XGBE_SPEED_2500_CDR,
+ XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_serdes_pq_skew[] = {
+ XGBE_SPEED_1000_PQ,
+ XGBE_SPEED_2500_PQ,
+ XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_serdes_tx_amp[] = {
+ XGBE_SPEED_1000_TXAMP,
+ XGBE_SPEED_2500_TXAMP,
+ XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_serdes_dfe_tap_cfg[] = {
+ XGBE_SPEED_1000_DFE_TAP_CONFIG,
+ XGBE_SPEED_2500_DFE_TAP_CONFIG,
+ XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_serdes_dfe_tap_ena[] = {
+ XGBE_SPEED_1000_DFE_TAP_ENABLE,
+ XGBE_SPEED_2500_DFE_TAP_ENABLE,
+ XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
static void xgbe_default_config(struct xgbe_prv_data *pdata)
{
DBGPR("-->xgbe_default_config\n");
@@ -153,8 +198,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
pdata->rx_pause = 1;
pdata->phy_speed = SPEED_UNKNOWN;
pdata->power_down = 0;
- pdata->default_autoneg = AUTONEG_ENABLE;
- pdata->default_speed = SPEED_10000;
DBGPR("<--xgbe_default_config\n");
}
@@ -162,6 +205,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
{
xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_phy(&pdata->phy_if);
xgbe_init_function_ptrs_desc(&pdata->desc_if);
}
@@ -248,23 +292,82 @@ static int xgbe_of_support(struct xgbe_prv_data *pdata)
return 0;
}
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct device_node *phy_node;
+ struct platform_device *phy_pdev;
+
+ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (phy_node) {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_pdev = of_find_device_by_node(phy_node);
+ of_node_put(phy_node);
+ } else {
+ /* New style device tree:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ get_device(dev);
+ phy_pdev = pdata->pdev;
+ }
+
+ return phy_pdev;
+}
#else /* CONFIG_OF */
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
}
-#endif /*CONFIG_OF */
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+ unsigned int type)
+{
+ unsigned int count;
+ int i;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (type == resource_type(res))
+ count++;
+ }
+
+ return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+ struct platform_device *phy_pdev;
+
+ if (pdata->use_acpi) {
+ get_device(pdata->dev);
+ phy_pdev = pdata->pdev;
+ } else {
+ phy_pdev = xgbe_of_get_phy_pdev(pdata);
+ }
+
+ return phy_pdev;
+}
static int xgbe_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
- struct xgbe_hw_if *hw_if;
- struct xgbe_desc_if *desc_if;
struct net_device *netdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &pdev->dev, *phy_dev;
+ struct platform_device *phy_pdev;
struct resource *res;
const char *phy_mode;
- unsigned int i;
+ unsigned int i, phy_memnum, phy_irqnum;
int ret;
DBGPR("--> xgbe_probe\n");
@@ -289,9 +392,36 @@ static int xgbe_probe(struct platform_device *pdev)
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
/* Check if we should use ACPI or DT */
pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+ phy_pdev = xgbe_get_phy_pdev(pdata);
+ if (!phy_pdev) {
+ dev_err(dev, "unable to obtain phy device\n");
+ ret = -EINVAL;
+ goto err_phydev;
+ }
+ phy_dev = &phy_pdev->dev;
+
+ if (pdev == phy_pdev) {
+ /* New style device tree or ACPI:
+ * The XGBE and PHY resources are grouped together with
+ * the PHY resources listed last
+ */
+ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+ phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ } else {
+ /* Old style device tree:
+ * The XGBE and PHY resources are separate
+ */
+ phy_memnum = 0;
+ phy_irqnum = 0;
+ }
+
/* Set and validate the number of descriptors for a ring */
BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -318,7 +448,8 @@ static int xgbe_probe(struct platform_device *pdev)
ret = PTR_ERR(pdata->xgmac_regs);
goto err_io;
}
- DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pdata->xpcs_regs = devm_ioremap_resource(dev, res);
@@ -327,7 +458,38 @@ static int xgbe_probe(struct platform_device *pdev)
ret = PTR_ERR(pdata->xpcs_regs);
goto err_io;
}
- DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->rxtx_regs)) {
+ dev_err(dev, "rxtx ioremap failed\n");
+ ret = PTR_ERR(pdata->rxtx_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir0_regs)) {
+ dev_err(dev, "sir0 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir0_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
+
+ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+ pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->sir1_regs)) {
+ dev_err(dev, "sir1 ioremap failed\n");
+ ret = PTR_ERR(pdata->sir1_regs);
+ goto err_io;
+ }
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
/* Retrieve the MAC address */
ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
@@ -355,6 +517,115 @@ static int xgbe_probe(struct platform_device *pdev)
if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
pdata->per_channel_irq = 1;
+ /* Retrieve the PHY speedset */
+ ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
+ &pdata->speed_set);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+ goto err_io;
+ }
+
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ case XGBE_SPEEDSET_2500_10000:
+ break;
+ default:
+ dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY configuration properties */
+ if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_BLWC_PROPERTY,
+ pdata->serdes_blwc,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_BLWC_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
+ sizeof(pdata->serdes_blwc));
+ }
+
+ if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_CDR_RATE_PROPERTY,
+ pdata->serdes_cdr_rate,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_CDR_RATE_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
+ sizeof(pdata->serdes_cdr_rate));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PQ_SKEW_PROPERTY,
+ pdata->serdes_pq_skew,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PQ_SKEW_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
+ sizeof(pdata->serdes_pq_skew));
+ }
+
+ if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_TX_AMP_PROPERTY,
+ pdata->serdes_tx_amp,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_TX_AMP_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
+ sizeof(pdata->serdes_tx_amp));
+ }
+
+ if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_DFE_CFG_PROPERTY,
+ pdata->serdes_dfe_tap_cfg,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_DFE_CFG_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
+ sizeof(pdata->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_DFE_ENA_PROPERTY,
+ pdata->serdes_dfe_tap_ena,
+ XGBE_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_DFE_ENA_PROPERTY);
+ goto err_io;
+ }
+ } else {
+ memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
+ sizeof(pdata->serdes_dfe_tap_ena));
+ }
+
/* Obtain device settings unique to ACPI/OF */
if (pdata->use_acpi)
ret = xgbe_acpi_support(pdata);
@@ -382,17 +653,23 @@ static int xgbe_probe(struct platform_device *pdev)
}
pdata->dev_irq = ret;
+ /* Get the auto-negotiation interrupt */
+ ret = platform_get_irq(phy_pdev, phy_irqnum++);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq phy 0 failed\n");
+ goto err_io;
+ }
+ pdata->an_irq = ret;
+
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
/* Set all the function pointers */
xgbe_init_all_fptrs(pdata);
- hw_if = &pdata->hw_if;
- desc_if = &pdata->desc_if;
/* Issue software reset to device */
- hw_if->exit(pdata);
+ pdata->hw_if.exit(pdata);
/* Populate the hardware features */
xgbe_get_all_hw_features(pdata);
@@ -401,8 +678,6 @@ static int xgbe_probe(struct platform_device *pdev)
xgbe_default_config(pdata);
/* Set the DMA mask */
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
ret = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(pdata->hw_feat.dma_width));
if (ret) {
@@ -447,16 +722,8 @@ static int xgbe_probe(struct platform_device *pdev)
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
- /* Prepare to regsiter with MDIO */
- pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
- if (!pdata->mii_bus_id) {
- dev_err(dev, "failed to allocate mii bus id\n");
- ret = -ENOMEM;
- goto err_io;
- }
- ret = xgbe_mdio_register(pdata);
- if (ret)
- goto err_bus_id;
+ /* Call MDIO/PHY initialization routine */
+ pdata->phy_if.phy_init(pdata);
/* Set device operations */
netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -501,26 +768,52 @@ static int xgbe_probe(struct platform_device *pdev)
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "net device registration failed\n");
- goto err_reg_netdev;
+ goto err_io;
+ }
+
+ /* Create the PHY/ANEG name based on netdev name */
+ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+ netdev_name(netdev));
+
+ /* Create workqueues */
+ pdata->dev_workqueue =
+ create_singlethread_workqueue(netdev_name(netdev));
+ if (!pdata->dev_workqueue) {
+ netdev_err(netdev, "device workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_netdev;
+ }
+
+ pdata->an_workqueue =
+ create_singlethread_workqueue(pdata->an_name);
+ if (!pdata->an_workqueue) {
+ netdev_err(netdev, "phy workqueue creation failed\n");
+ ret = -ENOMEM;
+ goto err_wq;
}
xgbe_ptp_register(pdata);
xgbe_debugfs_init(pdata);
+ platform_device_put(phy_pdev);
+
netdev_notice(netdev, "net device enabled\n");
DBGPR("<-- xgbe_probe\n");
return 0;
-err_reg_netdev:
- xgbe_mdio_unregister(pdata);
+err_wq:
+ destroy_workqueue(pdata->dev_workqueue);
-err_bus_id:
- kfree(pdata->mii_bus_id);
+err_netdev:
+ unregister_netdev(netdev);
err_io:
+ platform_device_put(phy_pdev);
+
+err_phydev:
free_netdev(netdev);
err_alloc:
@@ -540,11 +833,13 @@ static int xgbe_remove(struct platform_device *pdev)
xgbe_ptp_unregister(pdata);
- unregister_netdev(netdev);
+ flush_workqueue(pdata->an_workqueue);
+ destroy_workqueue(pdata->an_workqueue);
- xgbe_mdio_unregister(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+ destroy_workqueue(pdata->dev_workqueue);
- kfree(pdata->mii_bus_id);
+ unregister_netdev(netdev);
free_netdev(netdev);
@@ -557,16 +852,17 @@ static int xgbe_remove(struct platform_device *pdev)
static int xgbe_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
- int ret;
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret = 0;
DBGPR("-->xgbe_suspend\n");
- if (!netif_running(netdev)) {
- DBGPR("<--xgbe_dev_suspend\n");
- return -EINVAL;
- }
+ if (netif_running(netdev))
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
- ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
DBGPR("<--xgbe_suspend\n");
@@ -576,16 +872,16 @@ static int xgbe_suspend(struct device *dev)
static int xgbe_resume(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
- int ret;
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret = 0;
DBGPR("-->xgbe_resume\n");
- if (!netif_running(netdev)) {
- DBGPR("<--xgbe_dev_resume\n");
- return -EINVAL;
- }
+ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
- ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+ if (netif_running(netdev))
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
DBGPR("<--xgbe_resume\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 59e267f3f1b7..9088c3a35a20 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -119,194 +119,1246 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = mii->priv;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- int mmd_data;
+ unsigned int reg;
- DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
- prtad, mmd_reg);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+ reg |= XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
- DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- return mmd_data;
+ reg &= ~XGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
}
-static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
- u16 mmd_val)
+static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
{
- struct xgbe_prv_data *pdata = mii->priv;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- int mmd_data = mmd_val;
+ unsigned int reg;
- DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
- prtad, mmd_reg, mmd_data);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+ reg |= MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
- DBGPR_MDIO("<--xgbe_mdio_write\n");
+ usleep_range(75, 100);
- return 0;
+ reg &= ~MDIO_CTRL1_LPOWER;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
}
-void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
{
- struct device *dev = pdata->dev;
- struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
- int i;
-
- dev_alert(dev, "\n************* PHY Reg dump **********************\n");
-
- dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
- dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
- dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
- dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
- dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
- dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
-
- dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
- dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
- dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
- dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
- dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 2,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
- dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
- MDIO_AN_COMP_STAT,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
-
- dev_alert(dev, "MMD Device Mask = %#x\n",
- phydev->c45_ids.devices_in_package);
- for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
- dev_alert(dev, " MMD %d: ID = %#08x\n", i,
- phydev->c45_ids.device_ids[i]);
-
- dev_alert(dev, "\n*************************************************\n");
-}
-
-int xgbe_mdio_register(struct xgbe_prv_data *pdata)
-{
- struct mii_bus *mii;
- struct phy_device *phydev;
- int ret = 0;
-
- DBGPR("-->xgbe_mdio_register\n");
-
- mii = mdiobus_alloc();
- if (!mii) {
- dev_err(pdata->dev, "mdiobus_alloc failed\n");
- return -ENOMEM;
+ /* Assert Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+ unsigned int wait;
+ u16 status;
+
+ /* Release Rx and Tx ratechange */
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+ /* Wait for Rx and Tx ready */
+ wait = XGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ usleep_range(50, 75);
+
+ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+ goto rx_reset;
}
- /* Register on the MDIO bus (don't probe any PHYs) */
- mii->name = XGBE_PHY_NAME;
- mii->read = xgbe_mdio_read;
- mii->write = xgbe_mdio_write;
- snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
- mii->priv = pdata;
- mii->phy_mask = ~0;
- mii->parent = pdata->dev;
- ret = mdiobus_register(mii);
- if (ret) {
- dev_err(pdata->dev, "mdiobus_register failed\n");
- goto err_mdiobus_alloc;
+ netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+ status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Enable KR training */
+ xgbe_an_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_xgmii_speed(pdata);
+
+ /* Set PCS to KR/10G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBR;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED10G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 10G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable KR training */
+ xgbe_an_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_gmii_2500_speed(pdata);
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 2.5G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Disable KR training */
+ xgbe_an_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_gmii_speed(pdata);
+
+ /* Set PCS to KX/1G speed */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ reg &= ~MDIO_PCS_CTRL2_TYPE;
+ reg |= MDIO_PCS_CTRL2_10GBX;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg &= ~MDIO_CTRL1_SPEEDSEL;
+ reg |= MDIO_CTRL1_SPEED1G;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ xgbe_pcs_power_cycle(pdata);
+
+ /* Set SerDes to 1G speed */
+ xgbe_serdes_start_ratechange(pdata);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+ pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
+ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+ pdata->serdes_tx_amp[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+ pdata->serdes_blwc[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+ pdata->serdes_pq_skew[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+ pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
+ XRXTX_IOWRITE(pdata, RXTX_REG22,
+ pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+
+ xgbe_serdes_complete_ratechange(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode *mode)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+ if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+ *mode = XGBE_MODE_KR;
+ else
+ *mode = XGBE_MODE_KX;
+}
+
+static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mode mode;
+
+ xgbe_cur_mode(pdata, &mode);
+
+ return (mode == XGBE_MODE_KR);
+}
+
+static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ if (xgbe_in_kr_mode(pdata)) {
+ if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
+ xgbe_gmii_mode(pdata);
+ else
+ xgbe_gmii_2500_mode(pdata);
+ } else {
+ xgbe_xgmii_mode(pdata);
}
- DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
-
- /* Probe the PCS using Clause 45 */
- phydev = get_phy_device(mii, XGBE_PRTAD, true);
- if (IS_ERR(phydev) || !phydev ||
- !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
- dev_err(pdata->dev, "get_phy_device failed\n");
- ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
- goto err_mdiobus_register;
+}
+
+static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ enum xgbe_mode cur_mode;
+
+ xgbe_cur_mode(pdata, &cur_mode);
+ if (mode != cur_mode)
+ xgbe_switch_mode(pdata);
+}
+
+static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_10000)
+ return true;
}
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
- MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
- ret = phy_device_register(phydev);
- if (ret) {
- dev_err(pdata->dev, "phy_device_register failed\n");
- goto err_phy_device;
+ return false;
+}
+
+static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_2500)
+ return true;
}
- if (!phydev->dev.driver) {
- dev_err(pdata->dev, "phy driver probe failed\n");
- ret = -EIO;
- goto err_phy_device;
+
+ return false;
+}
+
+static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return true;
+ } else {
+ if (pdata->phy.speed == SPEED_1000)
+ return true;
}
- /* Add a reference to the PHY driver so it can't be unloaded */
- pdata->phy_module = phydev->dev.driver->owner;
- if (!try_module_get(pdata->phy_module)) {
- dev_err(pdata->dev, "try_module_get failed\n");
- ret = -EIO;
- goto err_phy_device;
+ return false;
+}
+
+static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_an(pdata, true, true);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
+}
+
+static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_an(pdata, false, false);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
+}
+
+static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = XGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!xgbe_in_kr_mode(pdata))
+ return XGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & XGBE_KR_TRAINING_ENABLE) {
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
}
- pdata->mii = mii;
- pdata->mdio_mmd = MDIO_MMD_PCS;
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ u16 msg;
+
+ *state = XGBE_RX_XNP;
+
+ msg = XGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= XGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return XGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an_tx_xnp(pdata, state)
+ : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+ enum xgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+ ? xgbe_an_tx_xnp(pdata, state)
+ : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_rx *state;
+ unsigned long an_timeout;
+ enum xgbe_an ret;
+
+ if (!pdata->an_start) {
+ pdata->an_start = jiffies;
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ pdata->an_start = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "AN timed out, resetting state\n");
+ }
+ }
- phydev->autoneg = pdata->default_autoneg;
- if (phydev->autoneg == AUTONEG_DISABLE) {
- phydev->speed = pdata->default_speed;
- phydev->duplex = DUPLEX_FULL;
+ state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ switch (*state) {
+ case XGBE_RX_BPA:
+ ret = xgbe_an_rx_bpa(pdata, state);
+ break;
+
+ case XGBE_RX_XNP:
+ ret = xgbe_an_rx_xnp(pdata, state);
+ break;
+
+ default:
+ ret = XGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = XGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = XGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != XGBE_RX_BPA)
+ return XGBE_AN_NO_LINK;
+ }
+
+ xgbe_disable_an(pdata);
+
+ xgbe_switch_mode(pdata);
+
+ xgbe_restart_an(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+ netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+ /* Interrupt reason must be read and cleared outside of IRQ context */
+ disable_irq_nosync(pdata->an_irq);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_an_irq_work(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_irq_work);
+
+ /* Avoid a race between enabling the IRQ and exiting the work by
+ * waiting for the work to finish and then queueing it
+ */
+ flush_work(&pdata->an_work);
+ queue_work(pdata->an_workqueue, &pdata->an_work);
+}
+
+static const char *xgbe_state_as_string(enum xgbe_an state)
+{
+ switch (state) {
+ case XGBE_AN_READY:
+ return "Ready";
+ case XGBE_AN_PAGE_RECEIVED:
+ return "Page-Received";
+ case XGBE_AN_INCOMPAT_LINK:
+ return "Incompatible-Link";
+ case XGBE_AN_COMPLETE:
+ return "Complete";
+ case XGBE_AN_NO_LINK:
+ return "No-Link";
+ case XGBE_AN_ERROR:
+ return "Error";
+ default:
+ return "Undefined";
+ }
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ an_work);
+ enum xgbe_an cur_state = pdata->an_state;
+ unsigned int int_reg, int_mask;
+
+ mutex_lock(&pdata->an_mutex);
+
+ /* Read the interrupt */
+ int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+ if (!int_reg)
+ goto out;
+
+next_int:
+ if (int_reg & XGBE_AN_PG_RCV) {
+ pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+ int_mask = XGBE_AN_PG_RCV;
+ } else if (int_reg & XGBE_AN_INC_LINK) {
+ pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+ int_mask = XGBE_AN_INC_LINK;
+ } else if (int_reg & XGBE_AN_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+ int_mask = XGBE_AN_INT_CMPLT;
+ } else {
+ pdata->an_state = XGBE_AN_ERROR;
+ int_mask = 0;
}
- pdata->phydev = phydev;
+ /* Clear the interrupt to be processed */
+ int_reg &= ~int_mask;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
+
+ pdata->an_result = pdata->an_state;
+
+again:
+ netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case XGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+
+ case XGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = xgbe_an_page_received(pdata);
+ pdata->an_supported++;
+ break;
+
+ case XGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = xgbe_an_incompat_link(pdata);
+ break;
- DBGPHY_REGS(pdata);
+ case XGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
+ pdata->an_supported ? "Auto negotiation"
+ : "Parallel detection");
+ break;
- DBGPR("<--xgbe_mdio_register\n");
+ case XGBE_AN_NO_LINK:
+ break;
+
+ default:
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == XGBE_AN_NO_LINK) {
+ int_reg = 0;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ } else if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ int_reg = 0;
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+ pdata->an_start = 0;
+
+ netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (int_reg)
+ goto next_int;
+
+out:
+ enable_irq(pdata->an_irq);
+
+ mutex_unlock(&pdata->an_mutex);
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+ (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (pdata->phy.advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~XGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
+}
+
+static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause && pdata->rx_pause)
+ return "rx/tx";
+ else if (pdata->rx_pause)
+ return "rx";
+ else if (pdata->tx_pause)
+ return "tx";
+ else
+ return "off";
+}
+
+static const char *xgbe_phy_speed_string(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported";
+ }
+}
+
+static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+ if (pdata->phy.link)
+ netdev_info(pdata->netdev,
+ "Link is Up - %s/%s - flow control %s\n",
+ xgbe_phy_speed_string(pdata->phy.speed),
+ pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+ xgbe_phy_fc_string(pdata));
+ else
+ netdev_info(pdata->netdev, "Link is Down\n");
+}
+
+static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+{
+ int new_state = 0;
+
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != pdata->phy.tx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy.rx_pause) {
+ new_state = 1;
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed) {
+ new_state = 1;
+ pdata->phy_speed = pdata->phy.speed;
+ }
+
+ if (pdata->phy_link != pdata->phy.link) {
+ new_state = 1;
+ pdata->phy_link = pdata->phy.link;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state && netif_msg_link(pdata))
+ xgbe_phy_print_status(pdata);
+}
+
+static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+ /* Disable auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Validate/Set specified speed */
+ switch (pdata->phy.speed) {
+ case SPEED_10000:
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ break;
+
+ case SPEED_2500:
+ case SPEED_1000:
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = jiffies;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE)
+ return xgbe_phy_config_fixed(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+
+ /* Disable auto-negotiation interrupt */
+ disable_irq(pdata->an_irq);
+
+ /* Start auto-negotiation in a supported mode */
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+ (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else {
+ enable_irq(pdata->an_irq);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ pdata->an_result = XGBE_AN_READY;
+ pdata->an_state = XGBE_AN_READY;
+ pdata->kr_state = XGBE_RX_BPA;
+ pdata->kx_state = XGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(pdata->an_irq);
+
+ /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+
+ /* Enable and start auto-negotiation */
+ xgbe_restart_an(pdata);
return 0;
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ mutex_lock(&pdata->an_mutex);
+
+ ret = __xgbe_phy_config_aneg(pdata);
+ if (ret)
+ set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+ else
+ clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
+
+ mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+ return (pdata->an_result == XGBE_AN_COMPLETE);
+}
+
+static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+{
+ unsigned long link_timeout;
+
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+ if (time_after(jiffies, link_timeout)) {
+ netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ xgbe_phy_config_aneg(pdata);
+ }
+}
+
+static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+{
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->phy.speed = SPEED_10000;
+ } else {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ }
+ }
+ pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+{
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ return xgbe_phy_status_force(pdata);
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+ break;
+ }
+ }
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ pdata->phy.speed = SPEED_10000;
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+ } else if (ad_reg & 0x20) {
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ }
+
+ xgbe_set_mode(pdata, XGBE_MODE_KX);
+ } else {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg, link_aneg;
+
+ if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+ netif_carrier_off(pdata->netdev);
+
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ /* Get the link status. Link status is latched low, so read
+ * once to clear and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+ if (pdata->phy.link) {
+ if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+ xgbe_check_link_timeout(pdata);
+ return;
+ }
+
+ xgbe_phy_status_aneg(pdata);
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+ if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
+ set_bit(XGBE_LINK, &pdata->dev_state);
+ netif_carrier_on(pdata->netdev);
+ }
+ } else {
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ xgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+
+ xgbe_phy_status_aneg(pdata);
+
+ if (test_bit(XGBE_LINK, &pdata->dev_state)) {
+ clear_bit(XGBE_LINK, &pdata->dev_state);
+ netif_carrier_off(pdata->netdev);
+ }
+ }
+
+adjust_link:
+ xgbe_phy_adjust_link(pdata);
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+ netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+ /* Disable auto-negotiation */
+ xgbe_disable_an(pdata);
+
+ /* Disable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
-err_phy_device:
- phy_device_free(phydev);
+ pdata->phy.link = 0;
+ if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+ netif_carrier_off(pdata->netdev);
-err_mdiobus_register:
- mdiobus_unregister(mii);
+ xgbe_phy_adjust_link(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+ ret = devm_request_irq(pdata->dev, pdata->an_irq,
+ xgbe_an_isr, 0, pdata->an_name,
+ pdata);
+ if (ret) {
+ netdev_err(netdev, "phy irq request failed\n");
+ return ret;
+ }
+
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (xgbe_use_xgmii_mode(pdata)) {
+ xgbe_xgmii_mode(pdata);
+ } else if (xgbe_use_gmii_mode(pdata)) {
+ xgbe_gmii_mode(pdata);
+ } else if (xgbe_use_gmii_2500_mode(pdata)) {
+ xgbe_gmii_2500_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+
+ /* Enable auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-err_mdiobus_alloc:
- mdiobus_free(mii);
+ return xgbe_phy_config_aneg(pdata);
+
+err_irq:
+ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
return ret;
}
-void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+ unsigned int count, reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ reg |= MDIO_CTRL1_RESET;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+ count = 50;
+ do {
+ msleep(20);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+ } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+ if (reg & MDIO_CTRL1_RESET)
+ return -ETIMEDOUT;
+
+ /* Disable auto-negotiation for now */
+ xgbe_disable_an(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+ return 0;
+}
+
+static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
{
- DBGPR("-->xgbe_mdio_unregister\n");
+ struct device *dev = pdata->dev;
+
+ dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_dbg(dev, "\n*************************************************\n");
+}
+
+static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ mutex_init(&pdata->an_mutex);
+ INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+ INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Initialize supported features */
+ pdata->phy.supported = SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ switch (pdata->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ break;
+ }
- pdata->phydev = NULL;
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
- module_put(pdata->phy_module);
- pdata->phy_module = NULL;
+ pdata->phy.advertising = pdata->phy.supported;
- mdiobus_unregister(pdata->mii);
- pdata->mii->priv = NULL;
+ pdata->phy.address = 0;
+
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
+ if (netif_msg_drv(pdata))
+ xgbe_dump_phy_registers(pdata);
+}
+
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = xgbe_phy_init;
- mdiobus_free(pdata->mii);
- pdata->mii = NULL;
+ phy_if->phy_reset = xgbe_phy_reset;
+ phy_if->phy_start = xgbe_phy_start;
+ phy_if->phy_stop = xgbe_phy_stop;
- DBGPR("<--xgbe_mdio_unregister\n");
+ phy_if->phy_status = xgbe_phy_status;
+ phy_if->phy_config_aneg = xgbe_phy_config_aneg;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e62dfa2deab6..63d72a140053 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -129,7 +129,7 @@
#include <net/dcbnl.h>
#define XGBE_DRV_NAME "amd-xgbe"
-#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_VERSION "1.0.2"
#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
/* Descriptor related defines */
@@ -178,14 +178,17 @@
#define XGMAC_JUMBO_PACKET_MTU 9000
#define XGMAC_MAX_JUMBO_PACKET 9018
-/* MDIO bus phy name */
-#define XGBE_PHY_NAME "amd_xgbe_phy"
-#define XGBE_PRTAD 0
-
/* Common property names */
#define XGBE_MAC_ADDR_PROPERTY "mac-address"
#define XGBE_PHY_MODE_PROPERTY "phy-mode"
#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
+#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
/* Device-tree clock names */
#define XGBE_DMA_CLOCK "dma_clk"
@@ -241,6 +244,49 @@
#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
#define XGBE_RSS_HASH_KEY_TYPE 1
+/* Auto-negotiation */
+#define XGBE_AN_MS_TIMEOUT 500
+#define XGBE_LINK_TIMEOUT 10
+
+#define XGBE_AN_INT_CMPLT 0x01
+#define XGBE_AN_INC_LINK 0x02
+#define XGBE_AN_PG_RCV 0x04
+#define XGBE_AN_INT_MASK 0x07
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT 500
+
+/* Default SerDes settings */
+#define XGBE_SPEED_10000_BLWC 0
+#define XGBE_SPEED_10000_CDR 0x7
+#define XGBE_SPEED_10000_PLL 0x1
+#define XGBE_SPEED_10000_PQ 0x12
+#define XGBE_SPEED_10000_RATE 0x0
+#define XGBE_SPEED_10000_TXAMP 0xa
+#define XGBE_SPEED_10000_WORD 0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
+
+#define XGBE_SPEED_2500_BLWC 1
+#define XGBE_SPEED_2500_CDR 0x2
+#define XGBE_SPEED_2500_PLL 0x0
+#define XGBE_SPEED_2500_PQ 0xa
+#define XGBE_SPEED_2500_RATE 0x1
+#define XGBE_SPEED_2500_TXAMP 0xf
+#define XGBE_SPEED_2500_WORD 0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_1000_BLWC 1
+#define XGBE_SPEED_1000_CDR 0x2
+#define XGBE_SPEED_1000_PLL 0x0
+#define XGBE_SPEED_1000_PQ 0xa
+#define XGBE_SPEED_1000_RATE 0x3
+#define XGBE_SPEED_1000_TXAMP 0xf
+#define XGBE_SPEED_1000_WORD 0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+
struct xgbe_prv_data;
struct xgbe_packet_data {
@@ -334,8 +380,6 @@ struct xgbe_ring_data {
*/
unsigned int state_saved;
struct {
- unsigned int incomplete;
- unsigned int context_next;
struct sk_buff *skb;
unsigned int len;
unsigned int error;
@@ -414,6 +458,13 @@ struct xgbe_channel {
struct xgbe_ring *rx_ring;
} ____cacheline_aligned;
+enum xgbe_state {
+ XGBE_DOWN,
+ XGBE_LINK,
+ XGBE_LINK_INIT,
+ XGBE_LINK_ERR,
+};
+
enum xgbe_int {
XGMAC_INT_DMA_CH_SR_TI,
XGMAC_INT_DMA_CH_SR_TPS,
@@ -445,6 +496,57 @@ enum xgbe_mtl_fifo_size {
XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
};
+enum xgbe_speed {
+ XGBE_SPEED_1000 = 0,
+ XGBE_SPEED_2500,
+ XGBE_SPEED_10000,
+ XGBE_SPEEDS,
+};
+
+enum xgbe_an {
+ XGBE_AN_READY = 0,
+ XGBE_AN_PAGE_RECEIVED,
+ XGBE_AN_INCOMPAT_LINK,
+ XGBE_AN_COMPLETE,
+ XGBE_AN_NO_LINK,
+ XGBE_AN_ERROR,
+};
+
+enum xgbe_rx {
+ XGBE_RX_BPA = 0,
+ XGBE_RX_XNP,
+ XGBE_RX_COMPLETE,
+ XGBE_RX_ERROR,
+};
+
+enum xgbe_mode {
+ XGBE_MODE_KR = 0,
+ XGBE_MODE_KX,
+};
+
+enum xgbe_speedset {
+ XGBE_SPEEDSET_1000_10000 = 0,
+ XGBE_SPEEDSET_2500_10000,
+};
+
+struct xgbe_phy {
+ u32 supported;
+ u32 advertising;
+ u32 lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
struct xgbe_mmc_stats {
/* Tx Stats */
u64 txoctetcount_gb;
@@ -492,6 +594,11 @@ struct xgbe_mmc_stats {
u64 rxwatchdogerror;
};
+struct xgbe_ext_stats {
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -591,6 +698,20 @@ struct xgbe_hw_if {
int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
+struct xgbe_phy_if {
+ /* For initial PHY setup */
+ void (*phy_init)(struct xgbe_prv_data *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct xgbe_prv_data *);
+ int (*phy_start)(struct xgbe_prv_data *);
+ void (*phy_stop)(struct xgbe_prv_data *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct xgbe_prv_data *);
+ int (*phy_config_aneg)(struct xgbe_prv_data *);
+};
+
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
@@ -660,6 +781,9 @@ struct xgbe_prv_data {
/* XGMAC/XPCS related mmio registers */
void __iomem *xgmac_regs; /* XGMAC CSRs */
void __iomem *xpcs_regs; /* XPCS MMD registers */
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
/* Overall device lock */
spinlock_t lock;
@@ -670,10 +794,14 @@ struct xgbe_prv_data {
/* RSS addressing mutex */
struct mutex rss_mutex;
+ /* Flags representing xgbe_state */
+ unsigned long dev_state;
+
int dev_irq;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
+ struct xgbe_phy_if phy_if;
struct xgbe_desc_if desc_if;
/* AXI DMA settings */
@@ -682,6 +810,11 @@ struct xgbe_prv_data {
unsigned int arcache;
unsigned int awcache;
+ /* Service routine support */
+ struct workqueue_struct *dev_workqueue;
+ struct work_struct service_work;
+ struct timer_list service_timer;
+
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
unsigned int channel_count;
@@ -729,27 +862,12 @@ struct xgbe_prv_data {
u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
u32 rss_options;
- /* MDIO settings */
- struct module *phy_module;
- char *mii_bus_id;
- struct mii_bus *mii;
- int mdio_mmd;
- struct phy_device *phydev;
- int default_autoneg;
- int default_speed;
-
- /* Current PHY settings */
- phy_interface_t phy_mode;
- int phy_link;
- int phy_speed;
- unsigned int phy_tx_pause;
- unsigned int phy_rx_pause;
-
/* Netdev related settings */
unsigned char mac_addr[ETH_ALEN];
netdev_features_t netdev_features;
struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
+ struct xgbe_ext_stats ext_stats;
/* Filtering support */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -787,6 +905,54 @@ struct xgbe_prv_data {
/* Keeps track of power mode */
unsigned int power_down;
+ /* Network interface message level setting */
+ u32 msg_enable;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+
+ /* MDIO/PHY related settings */
+ struct xgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+
+ char an_name[IFNAMSIZ + 32];
+ struct workqueue_struct *an_workqueue;
+
+ int an_irq;
+ struct work_struct an_irq_work;
+
+ unsigned int speed_set;
+
+ /* SerDes UEFI configurable settings.
+ * Switching between modes/speeds requires new values for some
+ * SerDes settings. The values can be supplied as device
+ * properties in array format. The first array entry is for
+ * 1GbE, second for 2.5GbE and third for 10GbE
+ */
+ u32 serdes_blwc[XGBE_SPEEDS];
+ u32 serdes_cdr_rate[XGBE_SPEEDS];
+ u32 serdes_pq_skew[XGBE_SPEEDS];
+ u32 serdes_tx_amp[XGBE_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
+
+ /* Auto-negotiation state machine support */
+ struct mutex an_mutex;
+ enum xgbe_an an_result;
+ enum xgbe_an an_state;
+ enum xgbe_rx kr_state;
+ enum xgbe_rx kx_state;
+ struct work_struct an_work;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+
#ifdef CONFIG_DEBUG_FS
struct dentry *xgbe_debugfs;
@@ -800,6 +966,7 @@ struct xgbe_prv_data {
/* Function prototypes*/
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
struct net_device_ops *xgbe_get_netdev_ops(void);
struct ethtool_ops *xgbe_get_ethtool_ops(void);
@@ -807,14 +974,11 @@ struct ethtool_ops *xgbe_get_ethtool_ops(void);
const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
#endif
-int xgbe_mdio_register(struct xgbe_prv_data *);
-void xgbe_mdio_unregister(struct xgbe_prv_data *);
-void xgbe_dump_phy_registers(struct xgbe_prv_data *);
void xgbe_ptp_register(struct xgbe_prv_data *);
void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
- unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+ unsigned int, unsigned int, unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
unsigned int);
void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
void xgbe_get_all_hw_features(struct xgbe_prv_data *);
@@ -831,18 +995,6 @@ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
#endif /* CONFIG_DEBUG_FS */
-/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_DESC_DUMP
-#define XGMAC_ENABLE_RX_DESC_DUMP
-#endif
-
-/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_PKT_DUMP
-#define XGMAC_ENABLE_RX_PKT_DUMP
-#endif
-
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
#if 0
#define YDEBUG
@@ -852,10 +1004,8 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
/* For debug prints */
#ifdef YDEBUG
#define DBGPR(x...) pr_alert(x)
-#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
#else
#define DBGPR(x...) do { } while (0)
-#define DBGPHY_REGS(x...) do { } while (0)
#endif
#ifdef YDEBUG_MDIO
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index 68be565548c0..700b5abe5de5 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -3,5 +3,5 @@
#
xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
- xgene_enet_main.o xgene_enet_ethtool.o
+ xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index b927021c6c40..25873d142b95 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -87,10 +87,11 @@ static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
int i;
xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
- for (i = 0; i < NUM_RING_CONFIG; i++) {
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
ring->state[i]);
}
@@ -98,7 +99,7 @@ static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
{
- memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+ memset(ring->state, 0, sizeof(ring->state));
xgene_enet_write_ring_state(ring);
}
@@ -141,8 +142,8 @@ static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring)
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
{
u32 size = ring->size;
u32 i, data;
@@ -168,7 +169,7 @@ struct xgene_enet_desc_ring *xgene_enet_setup_ring(
return ring;
}
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
{
u32 data;
bool is_bufpool;
@@ -186,6 +187,22 @@ out:
xgene_enet_clr_ring_state(ring);
}
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -803,3 +820,12 @@ struct xgene_port_ops xgene_gport_ops = {
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_gport_shutdown,
};
+
+struct xgene_ring_ops xgene_ring1_ops = {
+ .num_ring_config = NUM_RING_CONFIG,
+ .num_ring_id_shift = 6,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index d9bc89d69266..541bed056012 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -26,6 +26,7 @@
struct xgene_enet_pdata;
struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
/* clears and then set bits */
static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
@@ -101,8 +102,8 @@ enum xgene_enet_rm {
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
-
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
@@ -261,6 +262,7 @@ enum xgene_enet_ring_type {
enum xgene_ring_owner {
RING_OWNER_ETH0,
+ RING_OWNER_ETH1,
RING_OWNER_CPU = 15,
RING_OWNER_INVALID
};
@@ -314,9 +316,6 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
size / WORK_DESC_SIZE;
}
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
- struct xgene_enet_desc_ring *ring);
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status);
@@ -327,5 +326,6 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
#endif /* __XGENE_ENET_HW_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 40d3530d7f30..1bb317532f75 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -28,6 +28,8 @@
#define RES_RING_CSR 1
#define RES_RING_CMD 2
+static const struct of_device_id xgene_enet_of_match[];
+
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
@@ -48,6 +50,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
{
struct sk_buff *skb;
struct xgene_enet_raw_desc16 *raw_desc;
+ struct xgene_enet_pdata *pdata;
struct net_device *ndev;
struct device *dev;
dma_addr_t dma_addr;
@@ -58,6 +61,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev);
+ pdata = netdev_priv(ndev);
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
len = XGENE_ENET_MAX_MTU;
@@ -82,7 +86,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
tail = (tail + 1) & slots;
}
- iowrite32(nbuf, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, nbuf);
buf_pool->tail = tail;
return 0;
@@ -102,26 +106,16 @@ static u8 xgene_enet_hdr_len(const void *data)
return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
}
-static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state, num_msgs;
-
- ring_state = ioread32(&cmd_base[1]);
- num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
-
- return num_msgs >> NUMMSGSINQ_POS;
-}
-
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
struct xgene_enet_raw_desc16 *raw_desc;
u32 slots = buf_pool->slots - 1;
u32 tail = buf_pool->tail;
u32 userinfo;
int i, len;
- len = xgene_enet_ring_len(buf_pool);
+ len = pdata->ring_ops->len(buf_pool);
for (i = 0; i < len; i++) {
tail = (tail - 1) & slots;
raw_desc = &buf_pool->raw_desc16[tail];
@@ -131,7 +125,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
}
- iowrite32(-len, buf_pool->cmd);
+ pdata->ring_ops->wr_cmd(buf_pool, -len);
buf_pool->tail = tail;
}
@@ -263,8 +257,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
u32 tx_level, cq_level;
- tx_level = xgene_enet_ring_len(tx_ring);
- cq_level = xgene_enet_ring_len(cp_ring);
+ tx_level = pdata->ring_ops->len(tx_ring);
+ cq_level = pdata->ring_ops->len(cp_ring);
if (unlikely(tx_level > pdata->tx_qcnt_hi ||
cq_level > pdata->cp_qcnt_hi)) {
netif_stop_queue(ndev);
@@ -276,7 +270,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- iowrite32(1, tx_ring->cmd);
+ pdata->ring_ops->wr_cmd(tx_ring, 1);
skb_tx_timestamp(skb);
tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
@@ -389,11 +383,11 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
} while (--budget);
if (likely(count)) {
- iowrite32(-count, ring->cmd);
+ pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
if (netif_queue_stopped(ring->ndev)) {
- if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
+ if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
netif_wake_queue(ring->ndev);
}
}
@@ -510,6 +504,7 @@ static int xgene_enet_open(struct net_device *ndev)
else
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
+ netif_carrier_off(ndev);
netif_start_queue(ndev);
return ret;
@@ -545,7 +540,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
pdata = netdev_priv(ring->ndev);
dev = ndev_to_dev(ring->ndev);
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
@@ -598,15 +593,17 @@ static int xgene_enet_get_ring_size(struct device *dev,
static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
{
+ struct xgene_enet_pdata *pdata;
struct device *dev;
if (!ring)
return;
dev = ndev_to_dev(ring->ndev);
+ pdata = netdev_priv(ring->ndev);
if (ring->desc_addr) {
- xgene_enet_clear_ring(ring);
+ pdata->ring_ops->clear(ring);
dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
@@ -637,6 +634,25 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
}
}
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ if ((pdata->enet_id == XGENE_ENET2) &&
+ (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+ return true;
+ }
+
+ return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+ return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
@@ -668,9 +684,20 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
}
ring->size = size;
- ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+ if (is_irq_mbox_required(pdata, ring)) {
+ ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
+ &ring->irq_mbox_dma, GFP_KERNEL);
+ if (!ring->irq_mbox_addr) {
+ dma_free_coherent(dev, size, ring->desc_addr,
+ ring->dma);
+ devm_kfree(dev, ring);
+ return NULL;
+ }
+ }
+
+ ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
- ring = xgene_enet_setup_ring(ring);
+ ring = pdata->ring_ops->setup(ring);
netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
ring->num, ring->size, ring->id, ring->slots);
@@ -682,12 +709,34 @@ static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
return (owner << 6) | (bufnum & GENMASK(5, 0));
}
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+ enum xgene_ring_owner owner;
+
+ if (p->enet_id == XGENE_ENET1) {
+ switch (p->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ owner = RING_OWNER_ETH0;
+ break;
+ default:
+ owner = (!p->port_id) ? RING_OWNER_ETH0 :
+ RING_OWNER_ETH1;
+ break;
+ }
+ } else {
+ owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+ }
+
+ return owner;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
+ enum xgene_ring_owner owner;
u8 cpu_bufnum = pdata->cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
@@ -696,6 +745,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
int ret;
/* allocate rx descriptor ring */
+ owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
@@ -705,7 +755,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
/* allocate buffer pool for receiving packets */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, ring_id);
if (!buf_pool) {
@@ -734,7 +785,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
pdata->rx_ring = rx_ring;
/* allocate tx descriptor ring */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!tx_ring) {
@@ -824,14 +876,21 @@ static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
int ret;
ret = device_property_read_u32(dev, "port-id", &id);
- if (!ret && id > 1) {
- dev_err(dev, "Incorrect port-id specified\n");
- return -ENODEV;
- }
- pdata->port_id = id;
+ switch (ret) {
+ case -EINVAL:
+ pdata->port_id = 0;
+ ret = 0;
+ break;
+ case 0:
+ pdata->port_id = id & BIT(0);
+ break;
+ default:
+ dev_err(dev, "Incorrect port-id specified: errno: %d\n", ret);
+ break;
+ }
- return 0;
+ return ret;
}
static int xgene_get_mac_address(struct device *dev,
@@ -876,6 +935,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
struct device *dev;
struct resource *res;
void __iomem *base_addr;
+ u32 offset;
int ret;
pdev = pdata->pdev;
@@ -962,14 +1022,20 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
pdata->clk = NULL;
}
- base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+ base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+ else
+ base_addr = pdata->base_addr;
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
- pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+ offset = (pdata->enet_id == XGENE_ENET1) ?
+ BLOCK_ETH_MAC_CSR_OFFSET :
+ X2_BLOCK_ETH_MAC_CSR_OFFSET;
+ pdata->mcx_mac_csr_addr = base_addr + offset;
} else {
pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
@@ -1034,23 +1100,44 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
break;
}
- switch (pdata->port_id) {
- case 0:
- pdata->cpu_bufnum = START_CPU_BUFNUM_0;
- pdata->eth_bufnum = START_ETH_BUFNUM_0;
- pdata->bp_bufnum = START_BP_BUFNUM_0;
- pdata->ring_num = START_RING_NUM_0;
- break;
- case 1:
- pdata->cpu_bufnum = START_CPU_BUFNUM_1;
- pdata->eth_bufnum = START_ETH_BUFNUM_1;
- pdata->bp_bufnum = START_BP_BUFNUM_1;
- pdata->ring_num = START_RING_NUM_1;
- break;
- default:
- break;
+ if (pdata->enet_id == XGENE_ENET1) {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = START_BP_BUFNUM_0;
+ pdata->ring_num = START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = START_BP_BUFNUM_1;
+ pdata->ring_num = START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->ring_ops = &xgene_ring1_ops;
+ } else {
+ switch (pdata->port_id) {
+ case 0:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+ pdata->ring_num = X2_START_RING_NUM_0;
+ break;
+ case 1:
+ pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+ pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+ pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+ pdata->ring_num = X2_START_RING_NUM_1;
+ break;
+ default:
+ break;
+ }
+ pdata->rm = RM0;
+ pdata->ring_ops = &xgene_ring2_ops;
}
-
}
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
@@ -1086,6 +1173,9 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
struct xgene_mac_ops *mac_ops;
+#ifdef CONFIG_OF
+ const struct of_device_id *of_id;
+#endif
int ret;
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -1104,6 +1194,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
NETIF_F_GSO |
NETIF_F_GRO;
+#ifdef CONFIG_OF
+ of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+ if (of_id) {
+ pdata->enet_id = (enum xgene_enet_id)of_id->data;
+ if (!pdata->enet_id) {
+ free_netdev(ndev);
+ return -ENODEV;
+ }
+ }
+#endif
+
ret = xgene_enet_get_resources(pdata);
if (ret)
goto err;
@@ -1175,9 +1276,11 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id xgene_enet_of_match[] = {
- {.compatible = "apm,xgene-enet",},
- {.compatible = "apm,xgene1-sgenet",},
- {.compatible = "apm,xgene1-xgenet",},
+ {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+ {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+ {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
{},
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 8f3d232b09bc..1c85fc87703a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -35,6 +35,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
@@ -51,12 +52,26 @@
#define START_BP_BUFNUM_1 0x2A
#define START_RING_NUM_1 264
+#define X2_START_CPU_BUFNUM_0 0
+#define X2_START_ETH_BUFNUM_0 0
+#define X2_START_BP_BUFNUM_0 0x20
+#define X2_START_RING_NUM_0 0
+#define X2_START_CPU_BUFNUM_1 0xc
+#define X2_START_ETH_BUFNUM_1 0
+#define X2_START_BP_BUFNUM_1 0x20
+#define X2_START_RING_NUM_1 256
+
#define IRQ_ID_SIZE 16
#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
+enum xgene_enet_id {
+ XGENE_ENET1 = 1,
+ XGENE_ENET2
+};
+
/* software context of a descriptor ring */
struct xgene_enet_desc_ring {
struct net_device *ndev;
@@ -68,10 +83,12 @@ struct xgene_enet_desc_ring {
u16 irq;
char irq_name[IRQ_ID_SIZE];
u32 size;
- u32 state[NUM_RING_CONFIG];
+ u32 state[X2_NUM_RING_CONFIG];
void __iomem *cmd_base;
void __iomem *cmd;
dma_addr_t dma;
+ dma_addr_t irq_mbox_dma;
+ void *irq_mbox_addr;
u16 dst_ring_num;
u8 nbufpool;
struct sk_buff *(*rx_skb);
@@ -105,6 +122,15 @@ struct xgene_port_ops {
void (*shutdown)(struct xgene_enet_pdata *pdata);
};
+struct xgene_ring_ops {
+ u8 num_ring_config;
+ u8 num_ring_id_shift;
+ struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+ void (*clear)(struct xgene_enet_desc_ring *);
+ void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+ u32 (*len)(struct xgene_enet_desc_ring *);
+};
+
/* ethernet private data */
struct xgene_enet_pdata {
struct net_device *ndev;
@@ -113,6 +139,7 @@ struct xgene_enet_pdata {
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
+ enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring;
struct xgene_enet_desc_ring *rx_ring;
char *dev_name;
@@ -136,6 +163,7 @@ struct xgene_enet_pdata {
struct rtnl_link_stats64 stats;
struct xgene_mac_ops *mac_ops;
struct xgene_port_ops *port_ops;
+ struct xgene_ring_ops *ring_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644
index 000000000000..0b6896bb351e
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -0,0 +1,200 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ u64 addr = ring->dma;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+ ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+ }
+ ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+
+ addr >>= 8;
+ ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+ addr >>= 27;
+ ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+ | ACCEPTLERR
+ | SET_VAL(RINGADDRH, addr);
+ ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+ ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+ bool is_bufpool;
+ u32 val;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+ ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+ if (is_bufpool)
+ ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+ u32 *ring_cfg = ring->state;
+
+ ring_cfg[3] |= RECOMBBUF;
+ ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+ u32 offset, u32 data)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+ iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+ int i;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+ for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+ xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+ ring->state[i]);
+ }
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ memset(ring->state, 0, sizeof(ring->state));
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+ enum xgene_ring_owner owner;
+
+ xgene_enet_ring_set_type(ring);
+
+ owner = xgene_enet_ring_owner(ring->id);
+ if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+ xgene_enet_ring_set_recombbuf(ring);
+
+ xgene_enet_ring_init(ring);
+ xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id_val, ring_id_buf;
+ bool is_bufpool;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+ return;
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+ ring_id_val = ring->id & GENMASK(9, 0);
+ ring_id_val |= OVERWRITE;
+
+ ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+ ring_id_buf |= PREFETCH_BUF_EN;
+ if (is_bufpool)
+ ring_id_buf |= IS_BUFFER_POOL;
+
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+ u32 ring_id;
+
+ ring_id = ring->id | OVERWRITE;
+ xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+ xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+ struct xgene_enet_desc_ring *ring)
+{
+ bool is_bufpool;
+ u32 addr, i;
+
+ xgene_enet_clr_ring_state(ring);
+ xgene_enet_set_ring_state(ring);
+ xgene_enet_set_ring_id(ring);
+
+ ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+ is_bufpool = xgene_enet_is_bufpool(ring->id);
+ if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+ return ring;
+
+ addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+ xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+ for (i = 0; i < ring->slots; i++)
+ xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+ return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+ xgene_enet_clr_desc_ring_id(ring);
+ xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+ u32 data = 0;
+
+ if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+ data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+ INTR_CLEAR;
+ }
+ data |= (count & GENMASK(16, 0));
+
+ iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state, num_msgs;
+
+ ring_state = ioread32(&cmd_base[1]);
+ num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+ return num_msgs;
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+ .num_ring_config = X2_NUM_RING_CONFIG,
+ .num_ring_id_shift = 13,
+ .setup = xgene_enet_setup_ring,
+ .clear = xgene_enet_clear_ring,
+ .wr_cmd = xgene_enet_wr_cmd,
+ .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644
index 000000000000..8b235db23c42
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG 6
+
+#define INTR_MBOX_SIZE 1024
+#define CSR_VMID0_INTR_MBOX 0x0270
+#define INTR_CLEAR BIT(23)
+
+#define X2_MSG_AM_POS 10
+#define X2_QBASE_AM_POS 11
+#define X2_INTLINE_POS 24
+#define X2_INTLINE_LEN 5
+#define X2_CFGCRID_POS 29
+#define X2_CFGCRID_LEN 3
+#define X2_SELTHRSH_POS 7
+#define X2_SELTHRSH_LEN 3
+#define X2_RINGTYPE_POS 23
+#define X2_RINGTYPE_LEN 2
+#define X2_DEQINTEN_POS 29
+#define X2_RECOMTIMEOUT_POS 0
+#define X2_RECOMTIMEOUT_LEN 7
+#define X2_NUMMSGSINQ_POS 0
+#define X2_NUMMSGSINQ_LEN 17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f27fb6f2a93b..ff240b3cb2b8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -21,6 +21,7 @@
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
{
@@ -39,6 +40,14 @@ static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
iowrite32(val, p->eth_diag_csr_addr + offset);
}
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+ iowrite32(val, addr);
+}
+
static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
u32 wr_addr, u32 wr_data)
{
@@ -140,8 +149,9 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
{
- u32 val = 0xffffffff;
+ u32 val;
+ val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
}
@@ -227,6 +237,8 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
{
u32 data, loop = 10;
u32 offset = p->port_id * 4;
+ u32 enet_spare_cfg_reg, rsif_config_reg;
+ u32 cfg_bypass_reg, rx_dv_gate_reg;
xgene_sgmac_reset(p);
@@ -239,7 +251,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
SGMII_STATUS_ADDR >> 2);
if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
break;
- usleep_range(10, 20);
+ usleep_range(1000, 2000);
}
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
netdev_err(p->ndev, "Auto-negotiation failed\n");
@@ -249,33 +261,38 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
- data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
+ if (p->enet_id == XGENE_ENET1) {
+ enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+ } else {
+ enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+ rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+ cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+ rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+ }
+
+ data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
data |= MPA_IDLE_WITH_QMI_EMPTY;
- xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
+ xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
xgene_sgmac_set_mac_addr(p);
- data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
- data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
- xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
-
/* Adjust MDC clock frequency */
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&data, 7);
xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
/* Enable drop if bufpool not available */
- data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
+ data = xgene_enet_rd_csr(p, rsif_config_reg);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
- xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
-
- /* Rtype should be copied from FP */
- xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
+ xgene_enet_wr_csr(p, rsif_config_reg, data);
/* Bypass traffic gating */
- xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
- xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
- xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
+ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+ xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -331,14 +348,23 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
u32 dst_ring_num, u16 bufpool_id)
{
u32 data, fpsel;
+ u32 cle_bypass_reg0, cle_bypass_reg1;
u32 offset = p->port_id * MAC_OFFSET;
+ if (p->enet_id == XGENE_ENET1) {
+ cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+ cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+ } else {
+ cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+ cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+ }
+
data = CFG_CLE_BYPASS_EN0;
- xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
- xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
+ xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index a18a9d1f1143..27ba2fe3fca6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -122,7 +122,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
return true;
}
-
static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
u32 rd_addr, u32 *rd_data)
{
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 5a5296a6d1df..bf0a99435737 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -21,9 +21,28 @@
#ifndef __XGENE_ENET_XGMAC_H__
#define __XGENE_ENET_XGMAC_H__
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET 0x3000
#define BLOCK_AXG_MAC_OFFSET 0x0800
#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000
+#define XGENET_CONFIG_REG_ADDR 0x20
+#define XGENET_SRST_ADDR 0x00
+#define XGENET_CLKEN_ADDR 0x08
+
+#define CSR_CLK BIT(0)
+#define XGENET_CLK BIT(1)
+#define PCS_CLK BIT(3)
+#define AN_REF_CLK BIT(4)
+#define AN_CLK BIT(5)
+#define AD_CLK BIT(6)
+
+#define CSR_RST BIT(0)
+#define XGENET_RST BIT(1)
+#define PCS_RST BIT(3)
+#define AN_REF_RST BIT(4)
+#define AN_RST BIT(5)
+#define AD_RST BIT(6)
+
#define AXGMAC_CONFIG_0 0x0000
#define AXGMAC_CONFIG_1 0x0004
#define HSTMACRST BIT(31)
@@ -38,6 +57,7 @@
#define HSTMACADR_MSW_ADDR 0x0014
#define HSTMAXFRAME_LENGTH_ADDR 0x0020
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR 0x0004
#define XG_RSIF_CONFIG_REG_ADDR 0x00a0
#define XCLE_BYPASS_REG0_ADDR 0x0160
#define XCLE_BYPASS_REG1_ADDR 0x0164
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 3e9c3fc7591b..65d88d7c5581 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -1,6 +1,8 @@
#ifndef _B44_H
#define _B44_H
+#include <linux/brcmphy.h>
+
/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
#define B44_DEVCTRL 0x0000UL /* Device Control */
#define DEVCTRL_MPM 0x00000040 /* Magic Packet PME Enable (B0 only) */
@@ -281,8 +283,10 @@ struct ring_info {
};
#define B44_MCAST_TABLE_SIZE 32
-#define B44_PHY_ADDR_NO_LOCAL_PHY 30 /* no local phy regs */
-#define B44_PHY_ADDR_NO_PHY 31 /* no phy present at all */
+/* no local phy regs, e.g: Broadcom switches pseudo-PHY */
+#define B44_PHY_ADDR_NO_LOCAL_PHY BRCM_PSEUDO_PHY_ADDR
+/* no phy present at all */
+#define B44_PHY_ADDR_NO_PHY 31
#define B44_MDC_RATIO 5000000
#define B44_STAT_REG_DECLARE \
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 783543ad1fcf..909ad7a0d480 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -456,6 +456,67 @@ static int bcm_sysport_set_wol(struct net_device *dev,
return 0;
}
+static int bcm_sysport_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
+
+ ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
+ ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+
+ ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
+ ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+
+ return 0;
+}
+
+static int bcm_sysport_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+
+ /* Base system clock is 125Mhz, DMA timeout is this reference clock
+ * divided by 1024, which yield roughly 8.192 us, our maximum value has
+ * to fit in the RING_TIMEOUT_MASK (16 bits).
+ */
+ if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
+ ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
+ ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
+ ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
+ reg &= ~(RING_INTR_THRESH_MASK |
+ RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+ reg |= ec->tx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+ RING_TIMEOUT_SHIFT;
+ tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+ }
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+ reg &= ~(RDMA_INTR_THRESH_MASK |
+ RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+ reg |= ec->rx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
+ RDMA_TIMEOUT_SHIFT;
+ rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+
+ return 0;
+}
+
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
dev_kfree_skb_any(cb->skb);
@@ -463,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0);
}
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
- struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
{
struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
+ struct sk_buff *skb, *rx_skb;
dma_addr_t mapping;
- int ret;
- cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
- if (!cb->skb) {
+ /* Allocate a new SKB for a new packet */
+ skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!skb) {
+ priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
- return -ENOMEM;
+ return NULL;
}
- mapping = dma_map_single(kdev, cb->skb->data,
+ mapping = dma_map_single(kdev, skb->data,
RX_BUF_LENGTH, DMA_FROM_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
+ if (dma_mapping_error(kdev, mapping)) {
priv->mib.rx_dma_failed++;
- bcm_sysport_free_cb(cb);
+ dev_kfree_skb_any(skb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
- return ret;
+ return NULL;
}
- dma_unmap_addr_set(cb, dma_addr, mapping);
- dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+ /* Grab the current SKB on the ring */
+ rx_skb = cb->skb;
+ if (likely(rx_skb))
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
- priv->rx_bd_assign_index++;
- priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
- priv->rx_bd_assign_ptr = priv->rx_bds +
- (priv->rx_bd_assign_index * DESC_SIZE);
+ /* Put the new SKB on the ring */
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, cb->bd_addr, mapping);
netif_dbg(priv, rx_status, ndev, "RX refill\n");
- return 0;
+ /* Return the current SKB to the caller */
+ return rx_skb;
}
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{
struct bcm_sysport_cb *cb;
- int ret = 0;
+ struct sk_buff *skb;
unsigned int i;
for (i = 0; i < priv->num_rx_bds; i++) {
- cb = &priv->rx_cbs[priv->rx_bd_assign_index];
- if (cb->skb)
- continue;
-
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- break;
+ cb = &priv->rx_cbs[i];
+ skb = bcm_sysport_rx_refill(priv, cb);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (!cb->skb)
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
/* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget)
{
- struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb;
@@ -531,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index;
u16 len, status;
struct bcm_rsb *rsb;
- int ret;
/* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -549,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
+ skb = bcm_sysport_rx_refill(priv, cb);
- processed++;
- priv->rx_read_ptr++;
-
- if (priv->rx_read_ptr == priv->num_rx_bds)
- priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since
@@ -566,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- goto refill;
+ goto next;
}
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
/* Extract the Receive Status Block prepended */
rsb = (struct bcm_rsb *)skb->data;
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -583,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, ndev, "oversized packet\n");
+ ndev->stats.rx_length_errors++;
+ ndev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -597,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_over_errors++;
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
skb_put(skb, len);
@@ -625,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_bytes += len;
napi_gro_receive(&priv->napi, skb);
-refill:
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- priv->mib.alloc_rx_buff_failed++;
+next:
+ processed++;
+ priv->rx_read_ptr++;
+
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
}
return processed;
@@ -1269,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
{
+ struct bcm_sysport_cb *cb;
u32 reg;
int ret;
+ int i;
/* Initialize SW view of the RX ring */
priv->num_rx_bds = NUM_RX_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
- priv->rx_bd_assign_ptr = priv->rx_bds;
- priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1286,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
return -ENOMEM;
}
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = priv->rx_cbs + i;
+ cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+ }
+
ret = bcm_sysport_alloc_rx_bufs(priv);
if (ret) {
netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
@@ -1641,6 +1711,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
.get_sset_count = bcm_sysport_get_sset_count,
.get_wol = bcm_sysport_get_wol,
.set_wol = bcm_sysport_set_wol,
+ .get_coalesce = bcm_sysport_get_coalesce,
+ .set_coalesce = bcm_sysport_set_coalesce,
};
static const struct net_device_ops bcm_sysport_netdev_ops = {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index e2c043eabbf3..f28bf545d7f4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -292,7 +292,7 @@ struct bcm_rsb {
#define RDMA_END_ADDR_LO 0x102c
#define RDMA_MBDONE_INTR 0x1030
-#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_INTR_THRESH_MASK 0x1ff
#define RDMA_TIMEOUT_SHIFT 16
#define RDMA_TIMEOUT_MASK 0xffff
@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
/* Receive queue */
void __iomem *rx_bds;
- void __iomem *rx_bd_assign_ptr;
- unsigned int rx_bd_assign_index;
struct bcm_sysport_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_read_ptr;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index db27febbb215..4fbb093e0d84 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -13,6 +13,7 @@
dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
#include <linux/netdevice.h>
#define BGMAC_DEV_CTL 0x000
@@ -349,7 +350,7 @@
#define BGMAC_DESC_CTL0_SOF 0x80000000 /* Start of frame */
#define BGMAC_DESC_CTL1_LEN 0x00001FFF
-#define BGMAC_PHY_NOREGS 0x1E
+#define BGMAC_PHY_NOREGS BRCM_PSEUDO_PHY_ADDR
#define BGMAC_PHY_MASK 0x1F
#define BGMAC_MAX_TX_RINGS 4
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 1f82a04ce01a..7a4aaa3c01b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -357,6 +357,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned int offset;
};
union db_prod {
@@ -381,9 +382,10 @@ union db_prod {
#define PAGES_PER_SGE_SHIFT 0
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE PAGE_SIZE
-#define SGE_PAGE_SHIFT PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT 12
+#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
SGE_PAGES), 0xffff)
@@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t {
TPA_MODE_GRO
};
+struct bnx2x_alloc_pool {
+ struct page *page;
+ dma_addr_t dma;
+ unsigned int offset;
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -599,6 +607,8 @@ struct bnx2x_fastpath {
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
+
+ struct bnx2x_alloc_pool page_pool;
};
#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec56a9b65dc3..e2a65334708d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (unlikely(page == NULL)) {
- BNX2X_ERR("Can't alloc sge\n");
- return -ENOMEM;
- }
+ if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGES, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- __free_pages(page, PAGES_PER_SGE_SHIFT);
- BNX2X_ERR("Can't map sge\n");
- return -ENOMEM;
+ /* put page reference used by the memory pool, since we
+ * won't be using this page as the mempool anymore.
+ */
+ if (pool->page)
+ put_page(pool->page);
+
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+ if (unlikely(!pool->page)) {
+ BNX2X_ERR("Can't alloc sge\n");
+ return -ENOMEM;
+ }
+
+ pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev,
+ pool->dma))) {
+ __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
+ pool->page = NULL;
+ BNX2X_ERR("Can't map sge\n");
+ return -ENOMEM;
+ }
+ pool->offset = 0;
}
- sw_buf->page = page;
+ get_page(pool->page);
+ sw_buf->page = pool->page;
+ sw_buf->offset = pool->offset;
+
+ mapping = pool->dma + sw_buf->offset;
dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+ pool->offset += SGE_PAGE_SIZE;
+
return 0;
}
@@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err;
}
- /* Unmap the page as we're going to pass it to the stack */
- dma_unmap_page(&bp->pdev->dev,
- dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
+ old_rx_pg.offset, frag_len);
else { /* GRO */
int rem;
int offset = 0;
for (rem = frag_len; rem > 0; rem -= gro_size) {
int len = rem > gro_size ? gro_size : rem;
skb_fill_page_desc(skb, frag_id++,
- old_rx_pg.page, offset, len);
+ old_rx_pg.page,
+ old_rx_pg.offset + offset,
+ len);
if (offset)
get_page(old_rx_pg.page);
offset += len;
@@ -662,7 +683,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d7a71758e876..2b30081ec26d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
- __free_pages(page, PAGES_PER_SGE_SHIFT);
+ /* Since many fragments can share the same page, make sure to
+ * only unmap and free the page once.
+ */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page);
sw_buf->page = NULL;
sge->addr_hi = 0;
@@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
((u8 *)fw_lo)[1] = mac[4];
}
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ /* Page was not fully fragmented. Unmap unused space */
+ if (pool->offset < PAGE_SIZE) {
+ dma_addr_t dma = pool->dma + pool->offset;
+ int size = PAGE_SIZE - pool->offset;
+
+ dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
+ }
+
+ put_page(pool->page);
+
+ pool->page = NULL;
+}
+
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
+
+ bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 6043734ea613..b43b2cb9b830 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2770,12 +2770,79 @@ static int bcmgenet_close(struct net_device *dev)
return ret;
}
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = ring->priv;
+ u32 p_index, c_index, intsts, intmsk;
+ struct netdev_queue *txq;
+ unsigned int free_bds;
+ unsigned long flags;
+ bool txq_stopped;
+
+ if (!netif_msg_tx_err(priv))
+ return;
+
+ txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->index == DESC_INDEX) {
+ intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+ } else {
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
+ }
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+ p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+ txq_stopped = netif_tx_queue_stopped(txq);
+ free_bds = ring->free_bds;
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+ "TX queue status: %s, interrupts: %s\n"
+ "(sw)free_bds: %d (sw)size: %d\n"
+ "(sw)p_index: %d (hw)p_index: %d\n"
+ "(sw)c_index: %d (hw)c_index: %d\n"
+ "(sw)clean_p: %d (sw)write_p: %d\n"
+ "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+ ring->index, ring->queue,
+ txq_stopped ? "stopped" : "active",
+ intsts & intmsk ? "enabled" : "disabled",
+ free_bds, ring->size,
+ ring->prod_index, p_index & DMA_P_INDEX_MASK,
+ ring->c_index, c_index & DMA_C_INDEX_MASK,
+ ring->clean_ptr, ring->write_ptr,
+ ring->cb_ptr, ring->end_ptr);
+}
+
static void bcmgenet_timeout(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 int0_enable = 0;
+ u32 int1_enable = 0;
+ unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+ bcmgenet_disable_tx_napi(priv);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+ bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+ bcmgenet_tx_reclaim_all(dev);
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++)
+ int1_enable |= (1 << q);
+
+ int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+ /* Re-enable TX interrupts if disabled */
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+ bcmgenet_enable_tx_napi(priv);
+
dev->trans_start = jiffies;
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 420949cc55aa..6bef04e2f735 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -47,7 +47,12 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
HZ / 100);
ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
- if (ret & MDIO_READ_FAIL)
+ /* Some broken devices are known not to release the line during
+ * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+ * that condition here and ignore the MDIO controller read failure
+ * indication.
+ */
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL))
return -EIO;
return ret & 0xffff;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 069952fa5d64..73c934cf6c61 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6618,7 +6618,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
static void tg3_frag_free(bool is_frag, void *data)
{
if (is_frag)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 6e10b99733a2..8584abcf5366 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -9,5 +9,3 @@ obj-$(CONFIG_BNA) += bna.o
bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
-
-EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index cf9f3956f198..95bc8b644a5d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -282,7 +282,6 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
- bfa_q_qe_init(&cee->ioc_notify);
bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index af25d8e8fae0..1d11d666d408 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -28,19 +28,6 @@
typedef void (*bfa_sm_t)(void *sm, int event);
-/* oc - object class eg. bfa_ioc
- * st - state, eg. reset
- * otype - object type, eg. struct bfa_ioc
- * etype - object type, eg. enum ioc_event
- */
-#define bfa_sm_state_decl(oc, st, otype, etype) \
- static void oc ## _sm_ ## st(otype * fsm, etype event)
-
-#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
-#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
-#define bfa_sm_get_state(_sm) ((_sm)->sm)
-#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-
/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
@@ -67,7 +54,6 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 3bfd9da92630..d152b3fa6c54 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -24,7 +24,6 @@
#include "bfa_defs_status.h"
#include "bfa_defs_mfg_comm.h"
-#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
/* ---------------------- adapter definitions ------------ */
@@ -55,7 +54,7 @@ struct bfa_adapter_attr {
char optrom_ver[BFA_VERSION_LEN];
char os_type[BFA_ADAPTER_OS_TYPE_LEN];
struct bfa_mfg_vpd vpd;
- struct mac mac;
+ u8 mac[ETH_ALEN];
u8 nports;
u8 max_speed;
@@ -187,8 +186,6 @@ enum {
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
-#pragma pack(1)
-
/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
@@ -211,7 +208,7 @@ struct bfa_mfg_block {
char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /* base mac address */
+ u8 mfg_mac[ETH_ALEN]; /* base mac address */
u8 num_mac; /* number of mac addresses */
u8 rsv2;
u32 card_type; /* card type */
@@ -227,9 +224,7 @@ struct bfa_mfg_block {
char initial_mode[8]; /* initial mode: hba/cna/nic */
u8 rsv4[84];
u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
-};
-
-#pragma pack()
+} __packed;
/* ---------------------- pci definitions ------------ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index a37326d44fbb..f048887cbb81 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -109,8 +109,6 @@ union bfa_port_stats_u {
struct bfa_port_eth_stats eth;
};
-#pragma pack(1)
-
#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
#define BFA_CEE_DCBX_MAX_PRIORITY (8)
#define BFA_CEE_DCBX_MAX_PGID (8)
@@ -133,7 +131,7 @@ struct bfa_cee_lldp_str {
u8 len;
u8 rsvd[2];
u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
-};
+} __packed;
/* LLDP parameters */
struct bfa_cee_lldp_cfg {
@@ -145,7 +143,7 @@ struct bfa_cee_lldp_cfg {
struct bfa_cee_lldp_str mgmt_addr;
u16 time_to_live;
u16 enabled_system_cap;
-};
+} __packed;
enum bfa_cee_dcbx_version {
DCBX_PROTOCOL_PRECEE = 1,
@@ -171,7 +169,7 @@ struct bfa_cee_dcbx_cfg {
u8 lls_fcoe; /* FCoE Logical Link Status */
u8 lls_lan; /* LAN Logical Link Status */
u8 rsvd[2];
-};
+} __packed;
/* CEE status */
/* Making this to tri-state for the benefit of port list command */
@@ -188,11 +186,11 @@ struct bfa_cee_attr {
u8 error_reason;
struct bfa_cee_lldp_cfg lldp_remote;
struct bfa_cee_dcbx_cfg dcbx_remote;
- mac_t src_mac;
+ u8 src_mac[ETH_ALEN];
u8 link_speed;
u8 nw_priority;
u8 filler[2];
-};
+} __packed;
/* LLDP/DCBX/CEE Statistics */
struct bfa_cee_stats {
@@ -214,8 +212,6 @@ struct bfa_cee_stats {
u32 cee_status_up; /*!< CEE status up */
u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 7a45cd0b594d..16090fdf1dd5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -59,8 +59,6 @@ enum {
BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
};
-#pragma pack(1)
-
/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
@@ -77,7 +75,7 @@ enum {
CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
- CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
+ CB_GPIO_PROTO = BIT(7) /*!< 8G 2port FC prototypes */
};
#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
@@ -148,8 +146,6 @@ struct bfa_mfg_vpd {
u8 len; /*!< vpd data length excluding header */
u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 68f3c13c9ef6..b009fd7dda6a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -23,14 +23,6 @@
/* IOC local definitions */
-#define bfa_ioc_state_disabled(__sm) \
- (((__sm) == BFI_IOC_UNINIT) || \
- ((__sm) == BFI_IOC_INITING) || \
- ((__sm) == BFI_IOC_HWINIT) || \
- ((__sm) == BFI_IOC_DISABLED) || \
- ((__sm) == BFI_IOC_FAIL) || \
- ((__sm) == BFI_IOC_CFG_DISABLED))
-
/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
@@ -57,12 +49,6 @@
((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
-#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
- ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
-
-#define bfa_ioc_mbox_cmd_pending(__ioc) \
- (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
- readl((__ioc)->ioc_regs.hfn_mbox_cmd))
static bool bfa_nw_auto_recover = true;
@@ -1105,12 +1091,9 @@ static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
struct bfa_ioc_notify *notify;
- struct list_head *qe;
- list_for_each(qe, &ioc->notify_q) {
- notify = (struct bfa_ioc_notify *)qe;
+ list_for_each_entry(notify, &ioc->notify_q, qe)
notify->cbfn(notify->cbarg, event);
- }
}
static void
@@ -1387,7 +1370,7 @@ static enum bfi_ioc_img_ver_cmp
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
struct bfi_ioc_image_hdr *fwhdr_to_cmp)
{
- if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
+ if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
return BFI_IOC_IMG_VER_INCOMP;
if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
@@ -1398,7 +1381,7 @@ bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
/* GA takes priority over internal builds of the same patch stream.
* At this point major minor maint and patch numbers are same.
*/
- if (fwhdr_is_ga(base_fwhdr) == true)
+ if (fwhdr_is_ga(base_fwhdr))
if (fwhdr_is_ga(fwhdr_to_cmp))
return BFI_IOC_IMG_VER_SAME;
else
@@ -1912,10 +1895,8 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
}
void
-bfa_nw_ioc_timeout(void *ioc_arg)
+bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
}
@@ -1980,10 +1961,9 @@ bfa_ioc_send_getattr(struct bfa_ioc *ioc)
}
void
-bfa_nw_ioc_hb_check(void *cbarg)
+bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = cbarg;
- u32 hb_count;
+ u32 hb_count;
hb_count = readl(ioc->ioc_regs.heartbeat);
if (ioc->hb_count == hb_count) {
@@ -2177,7 +2157,8 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
/**
* Enqueue command to firmware.
*/
- bfa_q_deq(&mod->cmd_q, &cmd);
+ cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+ list_del(&cmd->qe);
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
/**
@@ -2198,8 +2179,10 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
- while (!list_empty(&mod->cmd_q))
- bfa_q_deq(&mod->cmd_q, &cmd);
+ while (!list_empty(&mod->cmd_q)) {
+ cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
+ list_del(&cmd->qe);
+ }
}
/**
@@ -2223,7 +2206,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
/*
* Hold semaphore to serialize pll init and fwtrc.
*/
- if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
return 1;
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
@@ -2278,7 +2261,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
int tlen;
if (ioc->dbg_fwsave_once) {
- ioc->dbg_fwsave_once = 0;
+ ioc->dbg_fwsave_once = false;
if (ioc->dbg_fwsave_len) {
tlen = ioc->dbg_fwsave_len;
bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
@@ -2796,7 +2779,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
ad_attr->prototype = 0;
ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
- ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
+ bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
ad_attr->pcie_gen = ioc_attr->pcie_gen;
ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
@@ -2942,10 +2925,10 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
return ioc->attr->pwwn;
}
-mac_t
-bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
+void
+bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
{
- return ioc->attr->mac;
+ ether_addr_copy(mac, ioc->attr->mac);
}
/* Firmware failure detected. Start recovery actions. */
@@ -2997,9 +2980,8 @@ bfa_iocpf_stop(struct bfa_ioc *ioc)
}
void
-bfa_nw_iocpf_timeout(void *ioc_arg)
+bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
enum bfa_iocpf_state iocpf_st;
iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
@@ -3011,10 +2993,8 @@ bfa_nw_iocpf_timeout(void *ioc_arg)
}
void
-bfa_nw_iocpf_sem_timeout(void *ioc_arg)
+bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
{
- struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
-
bfa_ioc_hw_sem_get(ioc);
}
@@ -3245,7 +3225,6 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
flash->op_busy = 0;
bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
- bfa_q_qe_init(&flash->ioc_notify);
bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index effb7156e7a4..2c0b4c076355 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -232,12 +232,6 @@ struct bfa_ioc_hwif {
#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
#define bfa_ioc_is_default(__ioc) \
(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
-#define bfa_ioc_fetch_stats(__ioc, __stats) \
- (((__stats)->drv_stats) = (__ioc)->stats)
-#define bfa_ioc_clr_stats(__ioc) \
- memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
-#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
-#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
#define bfa_ioc_speed_sup(__ioc) \
BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
#define bfa_ioc_get_nports(__ioc) \
@@ -268,13 +262,6 @@ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
(__ioc)->asic_mode))
-#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
- if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
- ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
-} while (0)
-#define bfa_ioc_ownership_reset(__ioc) \
- ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
-
#define bfa_ioc_lpu_read_stat(__ioc) do { \
if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
@@ -309,7 +296,7 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
-mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac);
void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
@@ -317,10 +304,10 @@ int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
*/
-void bfa_nw_ioc_timeout(void *ioc);
-void bfa_nw_ioc_hb_check(void *ioc);
-void bfa_nw_iocpf_timeout(void *ioc);
-void bfa_nw_iocpf_sem_timeout(void *ioc);
+void bfa_nw_ioc_timeout(struct bfa_ioc *ioc);
+void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc);
+void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc);
/*
* F/W Image Size & Chunk
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 2e72445dbb4f..4247d8a44573 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -24,7 +24,7 @@
#include "bfa_defs.h"
#define bfa_ioc_ct_sync_pos(__ioc) \
- ((u32) (1 << bfa_ioc_pcifn(__ioc)))
+ ((u32)BIT(bfa_ioc_pcifn(__ioc)))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index c07d5b9372f4..9c5bb24e8abb 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -66,8 +66,9 @@ cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
cmdq->offset = 0;
cmdq->bytes_to_copy = 0;
while (!list_empty(&cmdq->pending_q)) {
- bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
- bfa_q_qe_init(&cmdq_ent->qe);
+ cmdq_ent = list_first_entry(&cmdq->pending_q,
+ struct bfa_msgq_cmd_entry, qe);
+ list_del(&cmdq_ent->qe);
call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
}
}
@@ -242,8 +243,8 @@ bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
/* Walk through pending list to see if the command can be posted */
while (!list_empty(&cmdq->pending_q)) {
- cmd =
- (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
+ cmd = list_first_entry(&cmdq->pending_q,
+ struct bfa_msgq_cmd_entry, qe);
if (ntohs(cmd->msg_hdr->num_entries) <=
BFA_MSGQ_FREE_CNT(cmdq)) {
list_del(&cmd->qe);
@@ -615,7 +616,6 @@ bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
bfa_msgq_rspq_attach(&msgq->rspq, msgq);
bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
- bfa_q_qe_init(&msgq->ioc_notify);
bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
}
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 2bcde4042268..81e59ea8b4f2 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -21,8 +21,6 @@
#include "bfa_defs.h"
-#pragma pack(1)
-
/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
@@ -36,10 +34,10 @@ struct bfi_mhdr {
struct {
u8 qid;
u8 fn_lpu; /*!< msg destination */
- } h2i;
+ } __packed h2i;
u16 i2htok; /*!< token in msgs to host */
- } mtag;
-};
+ } __packed mtag;
+} __packed;
#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
@@ -75,14 +73,14 @@ union bfi_addr_u {
struct {
u32 addr_lo;
u32 addr_hi;
- } a32;
-};
+ } __packed a32;
+} __packed;
/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
-};
+} __packed;
/*
* Large Message structure - 128 Bytes size Msgs
@@ -96,7 +94,7 @@ struct bfi_alen {
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
-};
+} __packed;
/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
@@ -184,19 +182,19 @@ enum bfi_ioc_i2h_msgs {
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
-};
+} __packed;
struct bfi_ioc_attr {
u64 mfg_pwwn; /*!< Mfg port wwn */
u64 mfg_nwwn; /*!< Mfg node wwn */
- mac_t mfg_mac; /*!< Mfg mac */
+ u8 mfg_mac[ETH_ALEN]; /*!< Mfg mac */
u8 port_mode; /* enum bfi_port_mode */
u8 rsvd_a;
u64 pwwn;
u64 nwwn;
- mac_t mac; /*!< PBC or Mfg mac */
+ u8 mac[ETH_ALEN]; /*!< PBC or Mfg mac */
u16 rsvd_b;
- mac_t fcoe_mac;
+ u8 fcoe_mac[ETH_ALEN];
u16 rsvd_c;
char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
u8 pcie_gen;
@@ -211,14 +209,14 @@ struct bfi_ioc_attr {
char optrom_version[BFA_VERSION_LEN];
struct bfa_mfg_vpd vpd;
u32 card_type; /*!< card type */
-};
+} __packed;
/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
-};
+} __packed;
/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
@@ -256,7 +254,7 @@ struct bfi_ioc_fwver {
u8 build;
u8 rsvd[2];
#endif
-};
+} __packed;
struct bfi_ioc_image_hdr {
u32 signature; /*!< constant signature */
@@ -269,7 +267,7 @@ struct bfi_ioc_image_hdr {
u32 rsvd_b[2];
struct bfi_ioc_fwver fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
-};
+} __packed;
enum bfi_ioc_img_ver_cmp {
BFI_IOC_IMG_VER_INCOMP,
@@ -301,7 +299,7 @@ enum bfi_port_mode {
struct bfi_ioc_hbeat {
struct bfi_mhdr mh; /*!< common msg header */
u32 hb_count; /*!< current heart beat count */
-};
+} __packed;
/* IOC hardware/firmware state */
enum bfi_ioc_state {
@@ -317,8 +315,6 @@ enum bfi_ioc_state {
BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
};
-#define BFI_IOC_ENDIAN_SIG 0x12345678
-
enum {
BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
@@ -337,12 +333,6 @@ enum {
BFI_ADAPTER_ ## __prop ## _SH)
#define BFI_ADAPTER_SETP(__prop, __val) \
((__val) << BFI_ADAPTER_ ## __prop ## _SH)
-#define BFI_ADAPTER_IS_PROTO(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_PROTO)
-#define BFI_ADAPTER_IS_TTV(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_TTV)
-#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
- ((__adap_type) & BFI_ADAPTER_UNSUPP)
#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
@@ -353,7 +343,7 @@ struct bfi_ioc_ctrl_req {
u16 clscode;
u16 rsvd;
u32 tv_sec;
-};
+} __packed;
/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
@@ -362,7 +352,7 @@ struct bfi_ioc_ctrl_reply {
u8 port_mode; /*!< enum bfa_mode */
u8 cap_bm; /*!< capability bit mask */
u8 rsvd;
-};
+} __packed;
#define BFI_IOC_MSGSZ 8
/* H2I Messages */
@@ -372,14 +362,14 @@ union bfi_ioc_h2i_msg_u {
struct bfi_ioc_ctrl_req disable_req;
struct bfi_ioc_getattr_req getattr_req;
u32 mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
-};
+} __packed;
/*----------------------------------------------------------------------
* MSGQ
@@ -408,7 +398,7 @@ struct bfi_msgq_mhdr {
u16 num_entries;
u8 enet_id;
u8 rsvd[1];
-};
+} __packed;
#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
(_mh).msg_class = (_mc); \
@@ -430,21 +420,21 @@ struct bfi_msgq {
union bfi_addr_u addr;
u16 q_depth; /* Total num of entries in the queue */
u8 rsvd[2];
-};
+} __packed;
/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */
struct bfi_msgq_cfg_req {
struct bfi_mhdr mh;
struct bfi_msgq cmdq;
struct bfi_msgq rspq;
-};
+} __packed;
/* BFI_ENET_MSGQ_CFG_RSP */
struct bfi_msgq_cfg_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/* BFI_MSGQ_H2I_DOORBELL */
struct bfi_msgq_h2i_db {
@@ -452,8 +442,8 @@ struct bfi_msgq_h2i_db {
union {
u16 cmdq_pi;
u16 rspq_ci;
- } idx;
-};
+ } __packed idx;
+} __packed;
/* BFI_MSGQ_I2H_DOORBELL */
struct bfi_msgq_i2h_db {
@@ -461,8 +451,8 @@ struct bfi_msgq_i2h_db {
union {
u16 rspq_pi;
u16 cmdq_ci;
- } idx;
-};
+ } __packed idx;
+} __packed;
#define BFI_CMD_COPY_SZ 28
@@ -470,14 +460,14 @@ struct bfi_msgq_i2h_db {
struct bfi_msgq_h2i_cmdq_copy_rsp {
struct bfi_mhdr mh;
u8 data[BFI_CMD_COPY_SZ];
-};
+} __packed;
/* BFI_MSGQ_I2H_CMD_COPY_REQ */
struct bfi_msgq_i2h_cmdq_copy_req {
struct bfi_mhdr mh;
u16 offset;
u16 len;
-};
+} __packed;
/*
* FLASH module specific
@@ -505,7 +495,7 @@ enum bfi_flash_i2h_msgs {
struct bfi_flash_query_req {
struct bfi_mhdr mh; /* Common msg header */
struct bfi_alen alen;
-};
+} __packed;
/*
* Flash write request
@@ -519,7 +509,7 @@ struct bfi_flash_write_req {
u8 rsv[2];
u32 offset;
u32 length;
-};
+} __packed;
/*
* Flash read request
@@ -532,7 +522,7 @@ struct bfi_flash_read_req {
u32 offset;
u32 length;
struct bfi_alen alen;
-};
+} __packed;
/*
* Flash query response
@@ -540,7 +530,7 @@ struct bfi_flash_read_req {
struct bfi_flash_query_rsp {
struct bfi_mhdr mh; /* Common msg header */
u32 status;
-};
+} __packed;
/*
* Flash read response
@@ -552,7 +542,7 @@ struct bfi_flash_read_rsp {
u8 rsv[3];
u32 status;
u32 length;
-};
+} __packed;
/*
* Flash write response
@@ -564,8 +554,6 @@ struct bfi_flash_write_rsp {
u8 rsv[3];
u32 status;
u32 length;
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index bd605bee72ee..fad651101c48 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -22,8 +22,6 @@
#include "bfi.h"
#include "bfa_defs_cna.h"
-#pragma pack(1)
-
enum bfi_port_h2i {
BFI_PORT_H2I_ENABLE_REQ = (1),
BFI_PORT_H2I_DISABLE_REQ = (2),
@@ -43,7 +41,7 @@ struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
-};
+} __packed;
/* Generic RSP type */
struct bfi_port_generic_rsp {
@@ -51,13 +49,13 @@ struct bfi_port_generic_rsp {
u8 status; /*!< port enable status */
u8 rsvd[3];
u32 msgtag; /*!< msgtag for reply */
-};
+} __packed;
/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
-};
+} __packed;
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
@@ -65,7 +63,7 @@ union bfi_port_h2i_msg_u {
struct bfi_port_generic_req disable_req;
struct bfi_port_get_stats_req getstats_req;
struct bfi_port_generic_req clearstats_req;
-};
+} __packed;
union bfi_port_i2h_msg_u {
struct bfi_mhdr mh;
@@ -73,7 +71,7 @@ union bfi_port_i2h_msg_u {
struct bfi_port_generic_rsp disable_rsp;
struct bfi_port_generic_rsp getstats_rsp;
struct bfi_port_generic_rsp clearstats_rsp;
-};
+} __packed;
/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
enum bfi_cee_h2i_msgs {
@@ -97,7 +95,7 @@ enum bfi_cee_i2h_msgs {
*/
struct bfi_lldp_reset_stats {
struct bfi_mhdr mh;
-};
+} __packed;
/*
* @brief H2I command structure for resetting the stats.
@@ -105,7 +103,7 @@ struct bfi_lldp_reset_stats {
*/
struct bfi_cee_reset_stats {
struct bfi_mhdr mh;
-};
+} __packed;
/*
* @brief get configuration command from host
@@ -114,7 +112,7 @@ struct bfi_cee_reset_stats {
struct bfi_cee_get_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
-};
+} __packed;
/*
* @brief reply message from firmware
@@ -124,7 +122,7 @@ struct bfi_cee_get_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/*
* @brief get configuration command from host
@@ -133,7 +131,7 @@ struct bfi_cee_get_rsp {
struct bfi_cee_stats_req {
struct bfi_mhdr mh;
union bfi_addr_u dma_addr;
-};
+} __packed;
/*
* @brief reply message from firmware
@@ -143,22 +141,20 @@ struct bfi_cee_stats_rsp {
struct bfi_mhdr mh;
u8 cmd_status;
u8 rsvd[3];
-};
+} __packed;
/* @brief mailbox command structures from host to firmware */
union bfi_cee_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_req get_req;
struct bfi_cee_stats_req stats_req;
-};
+} __packed;
/* @brief mailbox message structures from firmware to host */
union bfi_cee_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_cee_get_rsp get_rsp;
struct bfi_cee_stats_rsp stats_rsp;
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index bccca3bbadb8..d7be7ea8c7f5 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -36,8 +36,6 @@
#include "bfa_defs.h"
#include "bfi.h"
-#pragma pack(1)
-
#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */
#define BFI_ENET_TXQ_PRIO_MAX 8
@@ -59,8 +57,8 @@ union bfi_addr_be_u {
struct {
u32 addr_hi; /* Most Significant 32-bits */
u32 addr_lo; /* Least Significant 32-Bits */
- } a32;
-};
+ } __packed a32;
+} __packed;
/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
@@ -70,13 +68,13 @@ union bfi_addr_be_u {
#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BFI_ENET_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BFI_ENET_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BFI_ENET_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BFI_ENET_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BFI_ENET_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BFI_ENET_TXQ_WI_CF_FCOE_CRC BIT(8)
+#define BFI_ENET_TXQ_WI_CF_IPID_MODE BIT(5)
+#define BFI_ENET_TXQ_WI_CF_INS_PRIO BIT(4)
+#define BFI_ENET_TXQ_WI_CF_INS_VLAN BIT(3)
+#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM BIT(2)
+#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM BIT(1)
+#define BFI_ENET_TXQ_WI_CF_IP_CKSUM BIT(0)
struct bfi_enet_txq_wi_base {
u8 reserved;
@@ -88,28 +86,28 @@ struct bfi_enet_txq_wi_base {
u16 vlan_tag;
u16 lso_mss; /* Only 14 LSB are valid */
u32 frame_length; /* Only 24 LSB are valid */
-};
+} __packed;
struct bfi_enet_txq_wi_ext {
u16 reserved;
u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */
u32 reserved2[3];
-};
+} __packed;
struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
u16 reserved;
u16 length; /* Only 14 LSB are valid */
union bfi_addr_be_u addr;
-};
+} __packed;
/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
struct bfi_enet_txq_wi_ext ext;
- } wi;
+ } __packed wi;
struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX];
-};
+} __packed;
#define wi_hdr wi.base
#define wi_ext_hdr wi.ext
@@ -120,36 +118,36 @@ struct bfi_enet_txq_entry {
/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
-};
+} __packed;
/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
-#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
-#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
-#define BFI_ENET_CQ_EF_TOO_LONG (1 << 2)
-#define BFI_ENET_CQ_EF_FC_CRC_OK (1 << 3)
+#define BFI_ENET_CQ_EF_MAC_ERROR BIT(0)
+#define BFI_ENET_CQ_EF_FCS_ERROR BIT(1)
+#define BFI_ENET_CQ_EF_TOO_LONG BIT(2)
+#define BFI_ENET_CQ_EF_FC_CRC_OK BIT(3)
-#define BFI_ENET_CQ_EF_RSVD1 (1 << 4)
-#define BFI_ENET_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BFI_ENET_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BFI_ENET_CQ_EF_HDS_HEADER (1 << 7)
+#define BFI_ENET_CQ_EF_RSVD1 BIT(4)
+#define BFI_ENET_CQ_EF_L4_CKSUM_OK BIT(5)
+#define BFI_ENET_CQ_EF_L3_CKSUM_OK BIT(6)
+#define BFI_ENET_CQ_EF_HDS_HEADER BIT(7)
-#define BFI_ENET_CQ_EF_UDP (1 << 8)
-#define BFI_ENET_CQ_EF_TCP (1 << 9)
-#define BFI_ENET_CQ_EF_IP_OPTIONS (1 << 10)
-#define BFI_ENET_CQ_EF_IPV6 (1 << 11)
+#define BFI_ENET_CQ_EF_UDP BIT(8)
+#define BFI_ENET_CQ_EF_TCP BIT(9)
+#define BFI_ENET_CQ_EF_IP_OPTIONS BIT(10)
+#define BFI_ENET_CQ_EF_IPV6 BIT(11)
-#define BFI_ENET_CQ_EF_IPV4 (1 << 12)
-#define BFI_ENET_CQ_EF_VLAN (1 << 13)
-#define BFI_ENET_CQ_EF_RSS (1 << 14)
-#define BFI_ENET_CQ_EF_RSVD2 (1 << 15)
+#define BFI_ENET_CQ_EF_IPV4 BIT(12)
+#define BFI_ENET_CQ_EF_VLAN BIT(13)
+#define BFI_ENET_CQ_EF_RSS BIT(14)
+#define BFI_ENET_CQ_EF_RSVD2 BIT(15)
-#define BFI_ENET_CQ_EF_MCAST_MATCH (1 << 16)
-#define BFI_ENET_CQ_EF_MCAST (1 << 17)
-#define BFI_ENET_CQ_EF_BCAST (1 << 18)
-#define BFI_ENET_CQ_EF_REMOTE (1 << 19)
+#define BFI_ENET_CQ_EF_MCAST_MATCH BIT(16)
+#define BFI_ENET_CQ_EF_MCAST BIT(17)
+#define BFI_ENET_CQ_EF_BCAST BIT(18)
+#define BFI_ENET_CQ_EF_REMOTE BIT(19)
-#define BFI_ENET_CQ_EF_LOCAL (1 << 20)
+#define BFI_ENET_CQ_EF_LOCAL BIT(20)
/* CQ Entry Structure */
struct bfi_enet_cq_entry {
@@ -161,7 +159,7 @@ struct bfi_enet_cq_entry {
u8 reserved1;
u8 reserved2;
u8 rxq_id;
-};
+} __packed;
/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
@@ -169,23 +167,23 @@ struct bfi_enet_q {
union bfi_addr_u first_entry;
u16 pages; /* # of pages */
u16 page_sz;
-};
+} __packed;
struct bfi_enet_txq {
struct bfi_enet_q q;
u8 priority;
u8 rsvd[3];
-};
+} __packed;
struct bfi_enet_rxq {
struct bfi_enet_q q;
u16 rx_buffer_size;
u16 rsvd;
-};
+} __packed;
struct bfi_enet_cq {
struct bfi_enet_q q;
-};
+} __packed;
struct bfi_enet_ib_cfg {
u8 int_pkt_dma;
@@ -198,16 +196,16 @@ struct bfi_enet_ib_cfg {
u32 inter_pkt_timeout;
u8 inter_pkt_count;
u8 rsvd1[3];
-};
+} __packed;
struct bfi_enet_ib {
union bfi_addr_u index_addr;
union {
u16 msix_index;
u16 intx_bitmask;
- } intr;
+ } __packed intr;
u16 rsvd;
-};
+} __packed;
/* ENET command messages */
enum bfi_enet_h2i_msgs {
@@ -355,7 +353,7 @@ enum bfi_enet_err {
*/
struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
-};
+} __packed;
/* Enable/Disable Request
*
@@ -370,7 +368,7 @@ struct bfi_enet_enable_req {
struct bfi_msgq_mhdr mh;
u8 enable; /* 1 = enable; 0 = disable */
u8 rsvd[3];
-};
+} __packed;
/* Generic Response */
struct bfi_enet_rsp {
@@ -378,7 +376,7 @@ struct bfi_enet_rsp {
u8 error; /*!< if error see cmd_offset */
u8 rsvd;
u16 cmd_offset; /*!< offset to invalid parameter */
-};
+} __packed;
/* GLOBAL CONFIGURATION */
@@ -387,7 +385,7 @@ struct bfi_enet_rsp {
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
-};
+} __packed;
/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
@@ -400,7 +398,7 @@ struct bfi_enet_attr_rsp {
u32 max_cfg;
u32 max_ucmac;
u32 rit_size;
-};
+} __packed;
/* Tx Configuration
*
@@ -421,7 +419,7 @@ struct bfi_enet_tx_cfg {
u8 apply_vlan_filter;
u8 add_to_vswitch;
u8 rsvd1[1];
-};
+} __packed;
struct bfi_enet_tx_cfg_req {
struct bfi_msgq_mhdr mh;
@@ -431,7 +429,7 @@ struct bfi_enet_tx_cfg_req {
struct {
struct bfi_enet_txq q;
struct bfi_enet_ib ib;
- } q_cfg[BFI_ENET_TXQ_PRIO_MAX];
+ } __packed q_cfg[BFI_ENET_TXQ_PRIO_MAX];
struct bfi_enet_ib_cfg ib_cfg;
@@ -448,7 +446,7 @@ struct bfi_enet_tx_cfg_rsp {
u32 i_dbell; /* PCI base address offset */
u8 hw_qid; /* For debugging */
u8 rsvd[3];
- } q_handles[BFI_ENET_TXQ_PRIO_MAX];
+ } __packed q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
/* Rx Configuration
@@ -481,13 +479,13 @@ struct bfi_enet_rx_cfg {
u8 force_offset;
u8 type;
u8 rsvd1;
- } hds;
+ } __packed hds;
u8 multi_buffer;
u8 strip_vlan;
u8 drop_untagged;
u8 rsvd2;
-};
+} __packed;
/*
* Multicast frames are received on the ql of q-set index zero.
@@ -504,12 +502,12 @@ struct bfi_enet_rx_cfg_req {
struct bfi_enet_rxq qs; /* small/header buffers */
struct bfi_enet_cq cq;
struct bfi_enet_ib ib;
- } q_cfg[BFI_ENET_RX_QSET_MAX];
+ } __packed q_cfg[BFI_ENET_RX_QSET_MAX];
struct bfi_enet_ib_cfg ib_cfg;
struct bfi_enet_rx_cfg rx_cfg;
-};
+} __packed;
struct bfi_enet_rx_cfg_rsp {
struct bfi_msgq_mhdr mh;
@@ -524,8 +522,8 @@ struct bfi_enet_rx_cfg_rsp {
u8 hw_sqid; /* For debugging */
u8 hw_cqid; /* For debugging */
u8 rsvd;
- } q_handles[BFI_ENET_RX_QSET_MAX];
-};
+ } __packed q_handles[BFI_ENET_RX_QSET_MAX];
+} __packed;
/* RIT
*
@@ -537,7 +535,7 @@ struct bfi_enet_rit_req {
u16 size; /* number of table-entries used */
u8 rsvd[2];
u8 table[BFI_ENET_RSS_RIT_MAX];
-};
+} __packed;
/* RSS
*
@@ -556,12 +554,12 @@ struct bfi_enet_rss_cfg {
u8 mask;
u8 rsvd[2];
u32 key[BFI_ENET_RSS_KEY_LEN];
-};
+} __packed;
struct bfi_enet_rss_cfg_req {
struct bfi_msgq_mhdr mh;
struct bfi_enet_rss_cfg cfg;
-};
+} __packed;
/* MAC Unicast
*
@@ -573,16 +571,16 @@ struct bfi_enet_rss_cfg_req {
*/
struct bfi_enet_ucast_req {
struct bfi_msgq_mhdr mh;
- mac_t mac_addr;
+ u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
-};
+} __packed;
/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
- mac_t mac_addr;
-};
+ u8 mac_addr[ETH_ALEN];
+} __packed;
/* MAC Multicast
*
@@ -591,9 +589,9 @@ struct bfi_enet_mac_n_vlan_req {
*/
struct bfi_enet_mcast_add_req {
struct bfi_msgq_mhdr mh;
- mac_t mac_addr;
+ u8 mac_addr[ETH_ALEN];
u8 rsvd[2];
-};
+} __packed;
/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
@@ -605,7 +603,7 @@ struct bfi_enet_mcast_add_rsp {
u16 cmd_offset;
u16 handle;
u8 rsvd1[2];
-};
+} __packed;
/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
@@ -614,7 +612,7 @@ struct bfi_enet_mcast_del_req {
struct bfi_msgq_mhdr mh;
u16 handle;
u8 rsvd[2];
-};
+} __packed;
/* VLAN
*
@@ -626,7 +624,7 @@ struct bfi_enet_rx_vlan_req {
u8 block_idx;
u8 rsvd[3];
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
-};
+} __packed;
/* PAUSE
*
@@ -638,7 +636,7 @@ struct bfi_enet_set_pause_req {
u8 rsvd[2];
u8 tx_pause; /* 1 = enable; 0 = disable */
u8 rx_pause; /* 1 = enable; 0 = disable */
-};
+} __packed;
/* DIAGNOSTICS
*
@@ -650,7 +648,7 @@ struct bfi_enet_diag_lb_req {
u8 rsvd[2];
u8 mode; /* cable or Serdes */
u8 enable; /* 1 = enable; 0 = disable */
-};
+} __packed;
/* enum for Loopback opmodes */
enum {
@@ -671,14 +669,14 @@ struct bfi_enet_stats_req {
u32 rx_enet_mask;
u32 tx_enet_mask;
union bfi_addr_u host_buffer;
-};
+} __packed;
/* defines for "stats_mask" above. */
-#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
-#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
-#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
-#define BFI_ENET_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
-#define BFI_ENET_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
+#define BFI_ENET_STATS_MAC BIT(0) /* !< MAC Statistics */
+#define BFI_ENET_STATS_BPC BIT(1) /* !< Pause Stats from BPC */
+#define BFI_ENET_STATS_RAD BIT(2) /* !< Rx Admission Statistics */
+#define BFI_ENET_STATS_RX_FC BIT(3) /* !< Rx FC Stats from RxA */
+#define BFI_ENET_STATS_TX_FC BIT(4) /* !< Tx FC Stats from TxA */
#define BFI_ENET_STATS_ALL 0x1f
@@ -699,7 +697,7 @@ struct bfi_enet_stats_txf {
u64 errors;
u64 filter_vlan; /* frames filtered due to VLAN */
u64 filter_mac_sa; /* frames filtered due to SA check */
-};
+} __packed;
/* RxF Frame Statistics */
struct bfi_enet_stats_rxf {
@@ -715,7 +713,7 @@ struct bfi_enet_stats_rxf {
u64 bcast;
u64 bcast_vlan;
u64 frame_drops;
-};
+} __packed;
/* FC Tx Frame Statistics */
struct bfi_enet_stats_fc_tx {
@@ -734,7 +732,7 @@ struct bfi_enet_stats_fc_tx {
u64 txf_parity_errors;
u64 txf_timeout;
u64 txf_fid_parity_errors;
-};
+} __packed;
/* FC Rx Frame Statistics */
struct bfi_enet_stats_fc_rx {
@@ -749,7 +747,7 @@ struct bfi_enet_stats_fc_rx {
u64 rxf_bcast_octets;
u64 rxf_bcast;
u64 rxf_bcast_vlan;
-};
+} __packed;
/* RAD Frame Statistics */
struct bfi_enet_stats_rad {
@@ -770,7 +768,7 @@ struct bfi_enet_stats_rad {
u64 rx_bcast_vlan;
u64 rx_drops;
-};
+} __packed;
/* BPC Tx Registers */
struct bfi_enet_stats_bpc {
@@ -785,7 +783,7 @@ struct bfi_enet_stats_bpc {
u64 rx_zero_pause[8]; /*!< Pause cancellation */
/*!<Pause initiation rather than retention */
u64 rx_first_pause[8];
-};
+} __packed;
/* MAC Rx Statistics */
struct bfi_enet_stats_mac {
@@ -838,7 +836,7 @@ struct bfi_enet_stats_mac {
u64 tx_oversize;
u64 tx_undersize;
u64 tx_fragments;
-};
+} __packed;
/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
@@ -852,8 +850,6 @@ struct bfi_enet_stats {
struct bfi_enet_stats_fc_tx fc_tx_stats;
struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX];
struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX];
-};
-
-#pragma pack()
+} __packed;
#endif /* __BFI_ENET_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 8ba72b1f36d9..dc845b2571da 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -28,36 +28,8 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Macros and constants */
-#define BNA_IOC_TIMER_FREQ 200
-
-/* Log string size */
-#define BNA_MESSAGE_SIZE 256
-
#define bna_is_small_rxq(_id) ((_id) & 0x1)
-#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
- (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
-
-#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
-
-#define BNA_TO_POWER_OF_2(x) \
-do { \
- int _shift = 0; \
- while ((x) && (x) != 1) { \
- (x) >>= 1; \
- _shift++; \
- } \
- (x) <<= _shift; \
-} while (0)
-
-#define BNA_TO_POWER_OF_2_HIGH(x) \
-do { \
- int n = 1; \
- while (n < (x)) \
- n <<= 1; \
- (x) = n; \
-} while (0)
-
/*
* input : _addr-> os dma addr in host endian format,
* output : _bna_dma_addr-> pointer to hw dma addr
@@ -80,62 +52,8 @@ do { \
| ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
} while (0)
-#define containing_rec(addr, type, field) \
- ((type *)((unsigned char *)(addr) - \
- (unsigned char *)(&((type *)0)->field)))
-
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
-/* TxQ element is 64 bytes */
-#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
-#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
-
-#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
- (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
-}
-
-/* RxQ element is 8 bytes */
-#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
-#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
-
-#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
-}
-
-/* CQ element is 16 bytes */
-#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
-#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
-
-#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
-{ \
- unsigned int page_index; /* index within a page */ \
- void *page_addr; \
- \
- page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
- page_addr = (_qpt_ptr)[((_qe_idx) >> \
- BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
- (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
-}
-
-#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
- (&((_cast *)(_q_base))[(_qe_idx)])
-
-#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
-
#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
@@ -147,31 +65,10 @@ do { \
#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
(((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
((_q_depth) - 1))
-
#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
(_q_depth - 1))
-#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
-
-#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
-
-#define BNA_Q_PI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.producer_index = \
- (((_q_ptr)->q.producer_index + (_num)) & \
- ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_CI_ADD(_q_ptr, _num) \
- (_q_ptr)->q.consumer_index = \
- (((_q_ptr)->q.consumer_index + (_num)) \
- & ((_q_ptr)->q.q_depth - 1))
-
-#define BNA_Q_FREE_COUNT(_q_ptr) \
- (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-
-#define BNA_Q_IN_USE_COUNT(_q_ptr) \
- (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
-
#define BNA_LARGE_PKT_SIZE 1000
#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
@@ -222,21 +119,6 @@ do { \
} \
} while (0)
-#define call_rxf_pause_cbfn(rxf) \
-do { \
- if ((rxf)->oper_state_cbfn) { \
- void (*cbfn)(struct bnad *, struct bna_rx *); \
- struct bnad *cbarg; \
- cbfn = (rxf)->oper_state_cbfn; \
- cbarg = (rxf)->oper_state_cbarg; \
- (rxf)->oper_state_cbfn = NULL; \
- (rxf)->oper_state_cbarg = NULL; \
- cbfn(cbarg, rxf->rx); \
- } \
-} while (0)
-
-#define call_rxf_resume_cbfn(rxf) call_rxf_pause_cbfn(rxf)
-
#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
@@ -326,28 +208,24 @@ do { \
#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask)
#define bna_tx_from_rid(_bna, _rid, _tx) \
-do { \
- struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
- struct bna_tx *__tx; \
- struct list_head *qe; \
- _tx = NULL; \
- list_for_each(qe, &__tx_mod->tx_active_q) { \
- __tx = (struct bna_tx *)qe; \
- if (__tx->rid == (_rid)) { \
- (_tx) = __tx; \
- break; \
- } \
- } \
+do { \
+ struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \
+ struct bna_tx *__tx; \
+ _tx = NULL; \
+ list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) { \
+ if (__tx->rid == (_rid)) { \
+ (_tx) = __tx; \
+ break; \
+ } \
+ } \
} while (0)
#define bna_rx_from_rid(_bna, _rid, _rx) \
do { \
struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \
struct bna_rx *__rx; \
- struct list_head *qe; \
_rx = NULL; \
- list_for_each(qe, &__rx_mod->rx_active_q) { \
- __rx = (struct bna_rx *)qe; \
+ list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) { \
if (__rx->rid == (_rid)) { \
(_rx) = __rx; \
break; \
@@ -367,15 +245,12 @@ do { \
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
- struct bna_mac *mac = NULL;
- struct list_head *qe;
- list_for_each(qe, q) {
- if (BNA_MAC_IS_EQUAL(((struct bna_mac *)qe)->addr, addr)) {
- mac = (struct bna_mac *)qe;
- break;
- }
- }
- return mac;
+ struct bna_mac *mac;
+
+ list_for_each_entry(mac, q, qe)
+ if (ether_addr_equal(mac->addr, addr))
+ return mac;
+ return NULL;
}
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
@@ -401,7 +276,6 @@ void bna_hw_stats_get(struct bna *bna);
/* APIs for RxF */
struct bna_mac *bna_cam_mod_mac_get(struct list_head *head);
-void bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac);
struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
@@ -489,30 +363,19 @@ void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
void bna_rx_dim_update(struct bna_ccb *ccb);
enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
-enum bna_cb_status
-bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac);
enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist);
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
void (*cbfn)(struct bnad *, struct bna_rx *));
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac);
void
-bna_rx_mcast_delall(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+bna_rx_mcast_delall(struct bna_rx *rx);
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *));
+ enum bna_rxmode bitmask);
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
@@ -532,11 +395,10 @@ void bna_enet_enable(struct bna_enet *enet);
void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void (*cbfn)(void *));
void bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *));
+ struct bna_pause_config *pause_config);
void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
-void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
+void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac);
/* IOCETH */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index deb8da6ab9cc..05680e081b4f 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -207,7 +207,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
- if (rx_enet_mask & ((u32)(1 << i))) {
+ if (rx_enet_mask & ((u32)BIT(i))) {
int k;
count = sizeof(struct bfi_enet_stats_rxf) /
sizeof(u64);
@@ -222,7 +222,7 @@ bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
- if (tx_enet_mask & ((u32)(1 << i))) {
+ if (tx_enet_mask & ((u32)BIT(i))) {
int k;
count = sizeof(struct bfi_enet_stats_txf) /
sizeof(u64);
@@ -884,16 +884,6 @@ do { \
} \
} while (0)
-#define call_enet_pause_cbfn(enet) \
-do { \
- if ((enet)->pause_cbfn) { \
- void (*cbfn)(struct bnad *); \
- cbfn = (enet)->pause_cbfn; \
- (enet)->pause_cbfn = NULL; \
- cbfn((enet)->bna->bnad); \
- } \
-} while (0)
-
#define call_enet_mtu_cbfn(enet) \
do { \
if ((enet)->mtu_cbfn) { \
@@ -925,7 +915,6 @@ bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
static void
bna_enet_sm_stopped_entry(struct bna_enet *enet)
{
- call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
call_enet_stop_cbfn(enet);
}
@@ -947,7 +936,6 @@ bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
break;
case ENET_E_PAUSE_CFG:
- call_enet_pause_cbfn(enet);
break;
case ENET_E_MTU_CFG:
@@ -1039,7 +1027,6 @@ bna_enet_sm_started_entry(struct bna_enet *enet)
* NOTE: Do not call bna_enet_chld_start() here, since it will be
* inadvertently called during cfg_wait->started transition as well
*/
- call_enet_pause_cbfn(enet);
call_enet_mtu_cbfn(enet);
}
@@ -1211,8 +1198,6 @@ bna_enet_init(struct bna_enet *enet, struct bna *bna)
enet->stop_cbfn = NULL;
enet->stop_cbarg = NULL;
- enet->pause_cbfn = NULL;
-
enet->mtu_cbfn = NULL;
bfa_fsm_set_state(enet, bna_enet_sm_stopped);
@@ -1308,13 +1293,10 @@ bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
void
bna_enet_pause_config(struct bna_enet *enet,
- struct bna_pause_config *pause_config,
- void (*cbfn)(struct bnad *))
+ struct bna_pause_config *pause_config)
{
enet->pause_config = *pause_config;
- enet->pause_cbfn = cbfn;
-
bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
}
@@ -1330,9 +1312,9 @@ bna_enet_mtu_set(struct bna_enet *enet, int mtu,
}
void
-bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
+bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
{
- *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
+ bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
}
/* IOCETH */
@@ -1810,17 +1792,13 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&ucam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
- }
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&ucam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
- bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
+ for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++)
list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
- }
ucam_mod->bna = bna;
}
@@ -1828,17 +1806,6 @@ bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
static void
bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &ucam_mod->free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &ucam_mod->del_q)
- i++;
-
ucam_mod->bna = NULL;
}
@@ -1852,27 +1819,21 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
- }
mcam_mod->mchandle = (struct bna_mcam_handle *)
res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&mcam_mod->free_handle_q);
- for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
- bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
+ for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
list_add_tail(&mcam_mod->mchandle[i].qe,
- &mcam_mod->free_handle_q);
- }
+ &mcam_mod->free_handle_q);
/* A separate queue to allow synchronous setting of a list of MACs */
INIT_LIST_HEAD(&mcam_mod->del_q);
- for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
- bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
+ for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++)
list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
- }
mcam_mod->bna = bna;
}
@@ -1880,18 +1841,6 @@ bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
static void
bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &mcam_mod->free_q) i++;
-
- i = 0;
- list_for_each(qe, &mcam_mod->del_q) i++;
-
- i = 0;
- list_for_each(qe, &mcam_mod->free_handle_q) i++;
-
mcam_mod->bna = NULL;
}
@@ -2108,32 +2057,26 @@ bna_num_rxp_set(struct bna *bna, int num_rxp)
struct bna_mac *
bna_cam_mod_mac_get(struct list_head *head)
{
- struct list_head *qe;
-
- if (list_empty(head))
- return NULL;
+ struct bna_mac *mac;
- bfa_q_deq(head, &qe);
- return (struct bna_mac *)qe;
-}
+ mac = list_first_entry_or_null(head, struct bna_mac, qe);
+ if (mac)
+ list_del(&mac->qe);
-void
-bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
-{
- list_add_tail(&mac->qe, tail);
+ return mac;
}
struct bna_mcam_handle *
bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
{
- struct list_head *qe;
-
- if (list_empty(&mcam_mod->free_handle_q))
- return NULL;
+ struct bna_mcam_handle *handle;
- bfa_q_deq(&mcam_mod->free_handle_q, &qe);
+ handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
+ struct bna_mcam_handle, qe);
+ if (handle)
+ list_del(&handle->qe);
- return (struct bna_mcam_handle *)qe;
+ return handle;
}
void
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 174af0e9d056..52b45c9935aa 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -213,7 +213,7 @@ do { \
* 15 bits (32K) should be large enough to accumulate, anyways, and the max.
* acked events to h/w can be (32K + max poll weight) (currently 64).
*/
-#define BNA_IB_MAX_ACK_EVENTS (1 << 15)
+#define BNA_IB_MAX_ACK_EVENTS BIT(15)
/* These macros build the data portion of the TxQ/RxQ doorbell */
#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
@@ -282,13 +282,13 @@ do { \
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BNA_TXQ_WI_CF_FCOE_CRC BIT(8)
+#define BNA_TXQ_WI_CF_IPID_MODE BIT(5)
+#define BNA_TXQ_WI_CF_INS_PRIO BIT(4)
+#define BNA_TXQ_WI_CF_INS_VLAN BIT(3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM BIT(2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM BIT(1)
+#define BNA_TXQ_WI_CF_IP_CKSUM BIT(0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
@@ -297,36 +297,36 @@ do { \
* Completion Q defines
*/
/* CQ Entry Flags */
-#define BNA_CQ_EF_MAC_ERROR (1 << 0)
-#define BNA_CQ_EF_FCS_ERROR (1 << 1)
-#define BNA_CQ_EF_TOO_LONG (1 << 2)
-#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
-
-#define BNA_CQ_EF_RSVD1 (1 << 4)
-#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
-#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
-#define BNA_CQ_EF_HDS_HEADER (1 << 7)
-
-#define BNA_CQ_EF_UDP (1 << 8)
-#define BNA_CQ_EF_TCP (1 << 9)
-#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
-#define BNA_CQ_EF_IPV6 (1 << 11)
-
-#define BNA_CQ_EF_IPV4 (1 << 12)
-#define BNA_CQ_EF_VLAN (1 << 13)
-#define BNA_CQ_EF_RSS (1 << 14)
-#define BNA_CQ_EF_RSVD2 (1 << 15)
-
-#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
-#define BNA_CQ_EF_MCAST (1 << 17)
-#define BNA_CQ_EF_BCAST (1 << 18)
-#define BNA_CQ_EF_REMOTE (1 << 19)
-
-#define BNA_CQ_EF_LOCAL (1 << 20)
+#define BNA_CQ_EF_MAC_ERROR BIT(0)
+#define BNA_CQ_EF_FCS_ERROR BIT(1)
+#define BNA_CQ_EF_TOO_LONG BIT(2)
+#define BNA_CQ_EF_FC_CRC_OK BIT(3)
+
+#define BNA_CQ_EF_RSVD1 BIT(4)
+#define BNA_CQ_EF_L4_CKSUM_OK BIT(5)
+#define BNA_CQ_EF_L3_CKSUM_OK BIT(6)
+#define BNA_CQ_EF_HDS_HEADER BIT(7)
+
+#define BNA_CQ_EF_UDP BIT(8)
+#define BNA_CQ_EF_TCP BIT(9)
+#define BNA_CQ_EF_IP_OPTIONS BIT(10)
+#define BNA_CQ_EF_IPV6 BIT(11)
+
+#define BNA_CQ_EF_IPV4 BIT(12)
+#define BNA_CQ_EF_VLAN BIT(13)
+#define BNA_CQ_EF_RSS BIT(14)
+#define BNA_CQ_EF_RSVD2 BIT(15)
+
+#define BNA_CQ_EF_MCAST_MATCH BIT(16)
+#define BNA_CQ_EF_MCAST BIT(17)
+#define BNA_CQ_EF_BCAST BIT(18)
+#define BNA_CQ_EF_REMOTE BIT(19)
+
+#define BNA_CQ_EF_LOCAL BIT(20)
/* CAT2 ASIC does not use bit 21 as per the SPEC.
* Bit 31 is set in every end of frame completion
*/
-#define BNA_CQ_EF_EOP (1 << 31)
+#define BNA_CQ_EF_EOP BIT(31)
/* Data structures */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 8ab3a5f62706..64eb8c48fe79 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -46,7 +46,6 @@ do { \
static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
-static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
@@ -60,14 +59,10 @@ static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
- enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
-bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
- enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
enum bna_rxf_event);
@@ -82,11 +77,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
- if (rxf->flags & BNA_RXF_F_PAUSED) {
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- call_rxf_start_cbfn(rxf);
- } else
- bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
+ bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_STOP:
@@ -101,45 +92,6 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
call_rxf_cam_fltr_cbfn(rxf);
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- call_rxf_pause_cbfn(rxf);
- break;
-
- case RXF_E_RESUME:
- rxf->flags &= ~BNA_RXF_F_PAUSED;
- call_rxf_resume_cbfn(rxf);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
-{
- call_rxf_pause_cbfn(rxf);
-}
-
-static void
-bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_STOP:
- case RXF_E_FAIL:
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_CONFIG:
- call_rxf_cam_fltr_cbfn(rxf);
- break;
-
- case RXF_E_RESUME:
- rxf->flags &= ~BNA_RXF_F_PAUSED;
- bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
- break;
-
default:
bfa_sm_fault(event);
}
@@ -166,7 +118,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
bna_rxf_cfg_reset(rxf);
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
- call_rxf_resume_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
@@ -174,12 +125,6 @@ bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
/* No-op */
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- call_rxf_start_cbfn(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
- break;
-
case RXF_E_FW_RESP:
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
@@ -197,7 +142,6 @@ bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
- call_rxf_resume_cbfn(rxf);
}
static void
@@ -214,41 +158,6 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
- case RXF_E_PAUSE:
- rxf->flags |= BNA_RXF_F_PAUSED;
- if (!bna_rxf_fltr_clear(rxf))
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- else
- bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
- break;
-
- default:
- bfa_sm_fault(event);
- }
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
-{
-}
-
-static void
-bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
-{
- switch (event) {
- case RXF_E_FAIL:
- bna_rxf_cfg_reset(rxf);
- call_rxf_pause_cbfn(rxf);
- bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
- break;
-
- case RXF_E_FW_RESP:
- if (!bna_rxf_fltr_clear(rxf)) {
- /* No more pending CAM entries to clear */
- bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
- }
- break;
-
default:
bfa_sm_fault(event);
}
@@ -283,7 +192,7 @@ bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
- memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_ucast_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -299,7 +208,7 @@ bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
- memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
+ ether_addr_copy(req->mac_addr, mac->addr);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_add_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
@@ -447,19 +356,14 @@ static struct bna_mac *
bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
{
struct bna_mac *mac;
- struct list_head *qe;
- list_for_each(qe, &rxf->mcast_active_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ list_for_each_entry(mac, &rxf->mcast_active_q, qe)
+ if (ether_addr_equal(mac->addr, mac_addr))
return mac;
- }
- list_for_each(qe, &rxf->mcast_pending_del_q) {
- mac = (struct bna_mac *)qe;
- if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
+ list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
+ if (ether_addr_equal(mac->addr, mac_addr))
return mac;
- }
return NULL;
}
@@ -468,13 +372,10 @@ static struct bna_mcam_handle *
bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
{
struct bna_mcam_handle *mchandle;
- struct list_head *qe;
- list_for_each(qe, &rxf->mcast_handle_q) {
- mchandle = (struct bna_mcam_handle *)qe;
+ list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
if (mchandle->handle == handle)
return mchandle;
- }
return NULL;
}
@@ -515,7 +416,6 @@ bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
ret = 1;
}
list_del(&mchandle->qe);
- bfa_q_qe_init(&mchandle->qe);
bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
}
mac->handle = NULL;
@@ -527,26 +427,23 @@ static int
bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
- struct list_head *qe;
int ret;
/* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_pending_del_q,
+ struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
- bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- list_add_tail(&mac->qe, &rxf->mcast_active_q);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_active_q);
bna_bfi_mcast_add_req(rxf, mac);
return 1;
}
@@ -566,7 +463,7 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
block_idx++;
vlan_pending_bitmask >>= 1;
}
- rxf->vlan_pending_bitmask &= ~(1 << block_idx);
+ rxf->vlan_pending_bitmask &= ~BIT(block_idx);
bna_bfi_rx_vlan_filter_set(rxf, block_idx);
return 1;
}
@@ -577,27 +474,24 @@ bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
- struct list_head *qe;
struct bna_mac *mac;
int ret;
/* Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
- bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_pending_del_q,
+ struct bna_mac, qe);
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
- bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
if (ret)
return ret;
}
/* Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->mcast_pending_add_q);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
if (bna_rxf_mcast_del(rxf, mac, cleanup))
return 1;
}
@@ -658,25 +552,6 @@ bna_rxf_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/* Only software reset */
-static int
-bna_rxf_fltr_clear(struct bna_rxf *rxf)
-{
- if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
- return 1;
-
- return 0;
-}
-
static void
bna_rxf_cfg_reset(struct bna_rxf *rxf)
{
@@ -693,16 +568,13 @@ bna_rit_init(struct bna_rxf *rxf, int rit_size)
{
struct bna_rx *rx = rxf->rx;
struct bna_rxp *rxp;
- struct list_head *qe;
int offset = 0;
rxf->rit_size = rit_size;
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
+ list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxf->rit[offset] = rxp->cq.ccb->id;
offset++;
}
-
}
void
@@ -760,9 +632,6 @@ bna_rxf_init(struct bna_rxf *rxf,
INIT_LIST_HEAD(&rxf->mcast_active_q);
INIT_LIST_HEAD(&rxf->mcast_handle_q);
- if (q_config->paused)
- rxf->flags |= BNA_RXF_F_PAUSED;
-
rxf->rit = (u8 *)
res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
bna_rit_init(rxf, q_config->num_paths);
@@ -795,22 +664,21 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->ucast_active_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
}
if (rxf->ucast_pending_mac) {
- bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
- bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
- rxf->ucast_pending_mac);
+ list_add_tail(&rxf->ucast_pending_mac->qe,
+ bna_ucam_mod_free_q(rxf->rx->bna));
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
rxf->rxmode_pending = 0;
@@ -823,8 +691,6 @@ bna_rxf_uninit(struct bna_rxf *rxf)
rxf->rss_pending = 0;
rxf->vlan_strip_pending = false;
- rxf->flags = 0;
-
rxf->rx = NULL;
}
@@ -863,8 +729,7 @@ bna_rxf_fail(struct bna_rxf *rxf)
}
enum bna_cb_status
-bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac)
{
struct bna_rxf *rxf = &rx->rxf;
@@ -873,12 +738,11 @@ bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
- bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
}
- memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
+ ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
rxf->ucast_pending_set = 1;
- rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
@@ -904,8 +768,7 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, addr, ETH_ALEN);
+ ether_addr_copy(mac->addr, addr);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
@@ -917,35 +780,31 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
}
enum bna_cb_status
-bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist)
{
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
- struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->ucast_active_q,
+ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
- memcpy(del_mac, mac, sizeof(*del_mac));
+ ether_addr_copy(del_mac->addr, mac->addr);
+ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
/* Allocate nodes */
@@ -954,69 +813,57 @@ bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
+ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
}
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &ucam_mod->free_q);
}
return BNA_CB_UCAST_CAM_FULL;
}
enum bna_cb_status
-bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist)
{
struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
- struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
-
- memcpy(del_mac, mac, sizeof(*del_mac));
+ ether_addr_copy(del_mac->addr, mac->addr);
+ del_mac->handle = mac->handle;
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
/* Allocate nodes */
@@ -1025,8 +872,7 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL)
goto err_return;
- bfa_q_qe_init(&mac->qe);
- memcpy(mac->addr, mcaddr, ETH_ALEN);
+ ether_addr_copy(mac->addr, mcaddr);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
@@ -1034,70 +880,52 @@ bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
/* Add the new entries */
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
- bfa_q_deq(&list_head, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
+ mac = list_first_entry(&list_head, struct bna_mac, qe);
+ list_move_tail(&mac->qe, &mcam_mod->free_q);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
-bna_rx_mcast_delall(struct bna_rx *rx,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+bna_rx_mcast_delall(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
- struct list_head *qe;
struct bna_mac *mac, *del_mac;
int need_hw_config = 0;
/* Purge all entries from pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
- bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ mac = list_first_entry(&rxf->mcast_pending_add_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
}
/* Schedule all entries in active_q for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
- bfa_q_deq(&rxf->mcast_active_q, &qe);
- mac = (struct bna_mac *)qe;
- bfa_q_qe_init(&mac->qe);
-
+ mac = list_first_entry(&rxf->mcast_active_q,
+ struct bna_mac, qe);
+ list_del(&mac->qe);
del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
-
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
- bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
+ list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
need_hw_config = 1;
}
- if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
- rxf->cam_fltr_cbarg = rx->bna->bnad;
+ if (need_hw_config)
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
- return;
- }
-
- if (cbfn)
- (*cbfn)(rx->bna->bnad, rx);
}
void
@@ -1105,12 +933,12 @@ bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
- int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->vlan_pending_bitmask |= (1 << group_id);
+ rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
@@ -1120,12 +948,12 @@ bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
- int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
+ int bit = BIT((vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
- rxf->vlan_pending_bitmask |= (1 << group_id);
+ rxf->vlan_pending_bitmask |= BIT(group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
@@ -1134,23 +962,21 @@ static int
bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
- struct list_head *qe;
/* Delete MAC addresses previousely added */
if (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_del_q,
+ struct bna_mac, qe);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
+ list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
/* Set default unicast MAC */
if (rxf->ucast_pending_set) {
rxf->ucast_pending_set = 0;
- memcpy(rxf->ucast_active_mac.addr,
- rxf->ucast_pending_mac->addr, ETH_ALEN);
+ ether_addr_copy(rxf->ucast_active_mac.addr,
+ rxf->ucast_pending_mac->addr);
rxf->ucast_active_set = 1;
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_SET_REQ);
@@ -1159,9 +985,8 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
/* Add additional MAC entries */
if (!list_empty(&rxf->ucast_pending_add_q)) {
- bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_add_q,
+ struct bna_mac, qe);
list_add_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
@@ -1173,33 +998,30 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
static int
bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
- struct list_head *qe;
struct bna_mac *mac;
/* Throw away delete pending ucast entries */
while (!list_empty(&rxf->ucast_pending_del_q)) {
- bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
- bfa_q_qe_init(qe);
- mac = (struct bna_mac *)qe;
+ mac = list_first_entry(&rxf->ucast_pending_del_q,
+ struct bna_mac, qe);
if (cleanup == BNA_SOFT_CLEANUP)
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
- mac);
+ list_move_tail(&mac->qe,
+ bna_ucam_mod_del_q(rxf->rx->bna));
else {
bna_bfi_ucast_req(rxf, mac,
- BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
- bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
- mac);
+ BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
+ list_move_tail(&mac->qe,
+ bna_ucam_mod_del_q(rxf->rx->bna));
return 1;
}
}
/* Move active ucast entries to pending_add_q */
while (!list_empty(&rxf->ucast_active_q)) {
- bfa_q_deq(&rxf->ucast_active_q, &qe);
- bfa_q_qe_init(qe);
- list_add_tail(qe, &rxf->ucast_pending_add_q);
+ mac = list_first_entry(&rxf->ucast_active_q,
+ struct bna_mac, qe);
+ list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
if (cleanup == BNA_HARD_CLEANUP) {
- mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
return 1;
@@ -1654,14 +1476,11 @@ static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
- struct list_head *qe_rxp;
int is_regular = (rx->type == BNA_RX_T_REGULAR);
/* Start IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
+ list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
- }
bna_ethport_cb_rx_started(&rx->bna->ethport);
}
@@ -1804,7 +1623,6 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
- struct list_head *rxp_qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -1814,11 +1632,9 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
- for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
- i < rx->num_paths;
- i++, rxp_qe = bfa_q_next(rxp_qe)) {
- rxp = (struct bna_rxp *)rxp_qe;
-
+ for (i = 0; i < rx->num_paths; i++) {
+ rxp = rxp ? list_next_entry(rxp, qe)
+ : list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
GET_RXQS(rxp, q0, q1);
switch (rxp->type) {
case BNA_RXP_SLR:
@@ -1921,13 +1737,10 @@ static void
bna_rx_enet_stop(struct bna_rx *rx)
{
struct bna_rxp *rxp;
- struct list_head *qe_rxp;
/* Stop IB */
- list_for_each(qe_rxp, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe_rxp;
+ list_for_each_entry(rxp, &rx->rxp_q, qe)
bna_ib_stop(rx->bna, &rxp->cq.ib);
- }
bna_bfi_rx_enet_stop(rx);
}
@@ -1957,12 +1770,10 @@ static struct bna_rxq *
bna_rxq_get(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
- struct list_head *qe = NULL;
- bfa_q_deq(&rx_mod->rxq_free_q, &qe);
+ rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
+ list_del(&rxq->qe);
rx_mod->rxq_free_count--;
- rxq = (struct bna_rxq *)qe;
- bfa_q_qe_init(&rxq->qe);
return rxq;
}
@@ -1970,7 +1781,6 @@ bna_rxq_get(struct bna_rx_mod *rx_mod)
static void
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
- bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
@@ -1978,13 +1788,11 @@ bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
static struct bna_rxp *
bna_rxp_get(struct bna_rx_mod *rx_mod)
{
- struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
- bfa_q_deq(&rx_mod->rxp_free_q, &qe);
+ rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
+ list_del(&rxp->qe);
rx_mod->rxp_free_count--;
- rxp = (struct bna_rxp *)qe;
- bfa_q_qe_init(&rxp->qe);
return rxp;
}
@@ -1992,7 +1800,6 @@ bna_rxp_get(struct bna_rx_mod *rx_mod)
static void
bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
- bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
@@ -2000,18 +1807,16 @@ bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
static struct bna_rx *
bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
- struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
- if (type == BNA_RX_T_REGULAR) {
- bfa_q_deq(&rx_mod->rx_free_q, &qe);
- } else
- bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
+ BUG_ON(list_empty(&rx_mod->rx_free_q));
+ if (type == BNA_RX_T_REGULAR)
+ rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
+ else
+ rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
rx_mod->rx_free_count--;
- rx = (struct bna_rx *)qe;
- bfa_q_qe_init(&rx->qe);
- list_add_tail(&rx->qe, &rx_mod->rx_active_q);
+ list_move_tail(&rx->qe, &rx_mod->rx_active_q);
rx->type = type;
return rx;
@@ -2020,32 +1825,13 @@ bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
static void
bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
- struct list_head *prev_qe = NULL;
struct list_head *qe;
- bfa_q_qe_init(&rx->qe);
-
- list_for_each(qe, &rx_mod->rx_free_q) {
+ list_for_each_prev(qe, &rx_mod->rx_free_q)
if (((struct bna_rx *)qe)->rid < rx->rid)
- prev_qe = qe;
- else
break;
- }
-
- if (prev_qe == NULL) {
- /* This is the first entry */
- bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
- } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
- /* This is the last entry */
- list_add_tail(&rx->qe, &rx_mod->rx_free_q);
- } else {
- /* Somewhere in the middle */
- bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
- bfa_q_prev(&rx->qe) = prev_qe;
- bfa_q_next(prev_qe) = &rx->qe;
- bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
- }
+ list_add(&rx->qe, qe);
rx_mod->rx_free_count++;
}
@@ -2199,24 +1985,20 @@ void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type)
bna_rx_start(rx);
- }
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
@@ -2225,13 +2007,11 @@ bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
if (rx->type == type) {
bfa_wc_up(&rx_mod->rx_stop_wc);
bna_rx_stop(rx);
}
- }
bfa_wc_wait(&rx_mod->rx_stop_wc);
}
@@ -2240,15 +2020,12 @@ void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
- struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &rx_mod->rx_active_q) {
- rx = (struct bna_rx *)qe;
+ list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
bna_rx_fail(rx);
- }
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
@@ -2282,7 +2059,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rx_ptr = &rx_mod->rx[index];
- bfa_q_qe_init(&rx_ptr->qe);
INIT_LIST_HEAD(&rx_ptr->rxp_q);
rx_ptr->bna = NULL;
rx_ptr->rid = index;
@@ -2296,7 +2072,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RX-path queue */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rxp_ptr = &rx_mod->rxp[index];
- bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
@@ -2304,7 +2079,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
/* build RXQ queue */
for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
rxq_ptr = &rx_mod->rxq[index];
- bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
@@ -2313,21 +2087,6 @@ void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &rx_mod->rx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxp_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &rx_mod->rxq_free_q)
- i++;
-
rx_mod->bna = NULL;
}
@@ -2337,7 +2096,6 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
- struct list_head *rxp_qe;
int i;
bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
@@ -2345,10 +2103,8 @@ bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
rx->hw_id = cfg_rsp->hw_id;
- for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
- i < rx->num_paths;
- i++, rxp_qe = bfa_q_next(rxp_qe)) {
- rxp = (struct bna_rxp *)rxp_qe;
+ for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+ i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
GET_RXQS(rxp, q0, q1);
/* Setup doorbells */
@@ -2396,20 +2152,19 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
- cq_depth = dq_depth + hq_depth;
+ cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
- BNA_TO_POWER_OF_2_HIGH(dq_depth);
+ dq_depth = roundup_pow_of_two(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
- BNA_TO_POWER_OF_2_HIGH(hq_depth);
+ hq_depth = roundup_pow_of_two(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
@@ -2620,7 +2375,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
if (intr_info->intr_type == BNA_INTR_T_MSIX)
rxp->cq.ib.intr_vector = rxp->vector;
else
- rxp->cq.ib.intr_vector = (1 << rxp->vector);
+ rxp->cq.ib.intr_vector = BIT(rxp->vector);
rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
@@ -2691,7 +2446,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
- BNA_TO_POWER_OF_2_HIGH(cq_depth);
+ cq_depth = roundup_pow_of_two(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
@@ -2725,7 +2480,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
- rx_mod->rid_mask |= (1 << rx->rid);
+ rx_mod->rid_mask |= BIT(rx->rid);
return rx;
}
@@ -2742,7 +2497,8 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
- bfa_q_deq(&rx->rxp_q, &rxp);
+ rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
+ list_del(&rxp->qe);
GET_RXQS(rxp, q0, q1);
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
@@ -2769,15 +2525,13 @@ bna_rx_destroy(struct bna_rx *rx)
bna_rxp_put(rx_mod, rxp);
}
- list_for_each(qe, &rx_mod->rx_active_q) {
+ list_for_each(qe, &rx_mod->rx_active_q)
if (qe == &rx->qe) {
list_del(&rx->qe);
- bfa_q_qe_init(&rx->qe);
break;
}
- }
- rx_mod->rid_mask &= ~(1 << rx->rid);
+ rx_mod->rid_mask &= ~BIT(rx->rid);
rx->bna = NULL;
rx->priv = NULL;
@@ -2844,8 +2598,7 @@ bna_rx_vlan_strip_disable(struct bna_rx *rx)
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
- enum bna_rxmode bitmask,
- void (*cbfn)(struct bnad *, struct bna_rx *))
+ enum bna_rxmode bitmask)
{
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
@@ -2900,11 +2653,10 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
/* Trigger h/w if needed */
if (need_hw_config) {
- rxf->cam_fltr_cbfn = cbfn;
+ rxf->cam_fltr_cbfn = NULL;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
- } else if (cbfn)
- (*cbfn)(rx->bna->bnad, rx);
+ }
return BNA_CB_SUCCESS;
@@ -2928,10 +2680,8 @@ void
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
{
struct bna_rxp *rxp;
- struct list_head *qe;
- list_for_each(qe, &rx->rxp_q) {
- rxp = (struct bna_rxp *)qe;
+ list_for_each_entry(rxp, &rx->rxp_q, qe) {
rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
}
@@ -3024,16 +2774,6 @@ do { \
} \
} while (0)
-#define call_tx_prio_change_cbfn(tx) \
-do { \
- if ((tx)->prio_change_cbfn) { \
- void (*cbfn)(struct bnad *, struct bna_tx *); \
- cbfn = (tx)->prio_change_cbfn; \
- (tx)->prio_change_cbfn = NULL; \
- cbfn((tx)->bna->bnad, (tx)); \
- } \
-} while (0)
-
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
static void bna_bfi_tx_enet_start(struct bna_tx *tx);
static void bna_tx_enet_stop(struct bna_tx *tx);
@@ -3044,7 +2784,6 @@ enum bna_tx_event {
TX_E_FAIL = 3,
TX_E_STARTED = 4,
TX_E_STOPPED = 5,
- TX_E_PRIO_CHANGE = 6,
TX_E_CLEANUP_DONE = 7,
TX_E_BW_UPDATE = 8,
};
@@ -3085,10 +2824,6 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
/* No-op */
break;
- case TX_E_PRIO_CHANGE:
- call_tx_prio_change_cbfn(tx);
- break;
-
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3109,28 +2844,23 @@ bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_STARTED:
- if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
- tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
- BNA_TX_F_BW_UPDATED);
+ if (tx->flags & BNA_TX_F_BW_UPDATED) {
+ tx->flags &= ~BNA_TX_F_BW_UPDATED;
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
} else
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
- case TX_E_PRIO_CHANGE:
- tx->flags |= BNA_TX_F_PRIO_CHANGED;
- break;
-
case TX_E_BW_UPDATE:
tx->flags |= BNA_TX_F_BW_UPDATED;
break;
@@ -3144,11 +2874,9 @@ static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
int is_regular = (tx->type == BNA_TX_T_REGULAR);
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb->priority = txq->priority;
/* Start IB */
bna_ib_start(tx->bna, &txq->ib, is_regular);
@@ -3172,7 +2900,6 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
@@ -3205,7 +2932,6 @@ bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bna_tx_enet_stop(tx);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3225,7 +2951,6 @@ bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3256,7 +2981,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
- call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
@@ -3264,7 +2988,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3277,7 +3000,6 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
static void
bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
{
- call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
}
@@ -3293,7 +3015,6 @@ bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
- case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
@@ -3372,7 +3093,6 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
{
struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
struct bna_txq *txq = NULL;
- struct list_head *qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
@@ -3381,11 +3101,9 @@ bna_bfi_tx_enet_start(struct bna_tx *tx)
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
cfg_req->num_queues = tx->num_txq;
- for (i = 0, qe = bfa_q_first(&tx->txq_q);
- i < tx->num_txq;
- i++, qe = bfa_q_next(qe)) {
- txq = (struct bna_txq *)qe;
-
+ for (i = 0; i < tx->num_txq; i++) {
+ txq = txq ? list_next_entry(txq, qe)
+ : list_first_entry(&tx->txq_q, struct bna_txq, qe);
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
cfg_req->q_cfg[i].q.priority = txq->priority;
@@ -3437,13 +3155,10 @@ static void
bna_tx_enet_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
/* Stop IB */
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_stop(tx->bna, &txq->ib);
- }
bna_bfi_tx_enet_stop(tx);
}
@@ -3487,18 +3202,15 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
static struct bna_tx *
bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
- struct list_head *qe = NULL;
struct bna_tx *tx = NULL;
if (list_empty(&tx_mod->tx_free_q))
return NULL;
- if (type == BNA_TX_T_REGULAR) {
- bfa_q_deq(&tx_mod->tx_free_q, &qe);
- } else {
- bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
- }
- tx = (struct bna_tx *)qe;
- bfa_q_qe_init(&tx->qe);
+ if (type == BNA_TX_T_REGULAR)
+ tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+ else
+ tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
+ list_del(&tx->qe);
tx->type = type;
return tx;
@@ -3509,21 +3221,18 @@ bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
- struct list_head *prev_qe;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
- bfa_q_deq(&tx->txq_q, &txq);
- bfa_q_qe_init(&txq->qe);
+ txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
txq->tcb = NULL;
txq->tx = NULL;
- list_add_tail(&txq->qe, &tx_mod->txq_free_q);
+ list_move_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
- bfa_q_qe_init(&tx->qe);
break;
}
}
@@ -3531,28 +3240,11 @@ bna_tx_free(struct bna_tx *tx)
tx->bna = NULL;
tx->priv = NULL;
- prev_qe = NULL;
- list_for_each(qe, &tx_mod->tx_free_q) {
+ list_for_each_prev(qe, &tx_mod->tx_free_q)
if (((struct bna_tx *)qe)->rid < tx->rid)
- prev_qe = qe;
- else {
break;
- }
- }
- if (prev_qe == NULL) {
- /* This is the first entry */
- bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
- } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
- /* This is the last entry */
- list_add_tail(&tx->qe, &tx_mod->tx_free_q);
- } else {
- /* Somewhere in the middle */
- bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
- bfa_q_prev(&tx->qe) = prev_qe;
- bfa_q_next(prev_qe) = &tx->qe;
- bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
- }
+ list_add(&tx->qe, qe);
}
static void
@@ -3585,7 +3277,6 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
struct bna_txq *txq = NULL;
- struct list_head *qe;
int i;
bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
@@ -3593,10 +3284,8 @@ bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
tx->hw_id = cfg_rsp->hw_id;
- for (i = 0, qe = bfa_q_first(&tx->txq_q);
- i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
- txq = (struct bna_txq *)qe;
-
+ for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
+ i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
/* Setup doorbells */
txq->tcb->i_dbell->doorbell_addr =
tx->bna->pcidev.pci_bar_kva
@@ -3624,12 +3313,9 @@ void
bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
- struct list_head *qe;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
- }
}
void
@@ -3689,7 +3375,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
- struct list_head *qe;
int page_count;
int i;
@@ -3719,9 +3404,8 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
- bfa_q_deq(&tx_mod->txq_free_q, &txq);
- bfa_q_qe_init(&txq->qe);
- list_add_tail(&txq->qe, &tx->txq_q);
+ txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
+ list_move_tail(&txq->qe, &tx->txq_q);
txq->tx = tx;
}
@@ -3760,8 +3444,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
/* TxQ */
i = 0;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe) {
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
@@ -3779,7 +3462,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
intr_info->idl[0].vector :
intr_info->idl[i].vector;
if (intr_info->intr_type == BNA_INTR_T_INTX)
- txq->ib.intr_vector = (1 << txq->ib.intr_vector);
+ txq->ib.intr_vector = BIT(txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
@@ -3821,7 +3504,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
- tx_mod->rid_mask |= (1 << tx->rid);
+ tx_mod->rid_mask |= BIT(tx->rid);
return tx;
@@ -3834,15 +3517,12 @@ void
bna_tx_destroy(struct bna_tx *tx)
{
struct bna_txq *txq;
- struct list_head *qe;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
if (tx->tcb_destroy_cbfn)
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
- }
- tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
+ tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
bna_tx_free(tx);
}
@@ -3920,9 +3600,7 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
tx_mod->tx[i].rid = i;
- bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
- bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
@@ -3935,17 +3613,6 @@ bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
- struct list_head *qe;
- int i;
-
- i = 0;
- list_for_each(qe, &tx_mod->tx_free_q)
- i++;
-
- i = 0;
- list_for_each(qe, &tx_mod->txq_free_q)
- i++;
-
tx_mod->bna = NULL;
}
@@ -3953,24 +3620,20 @@ void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type)
bna_tx_start(tx);
- }
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
@@ -3979,13 +3642,11 @@ bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
if (tx->type == type) {
bfa_wc_up(&tx_mod->tx_stop_wc);
bna_tx_stop(tx);
}
- }
bfa_wc_wait(&tx_mod->tx_stop_wc);
}
@@ -3994,25 +3655,19 @@ void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
- struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
- list_for_each(qe, &tx_mod->tx_active_q) {
- tx = (struct bna_tx *)qe;
+ list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
bna_tx_fail(tx);
- }
}
void
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
{
struct bna_txq *txq;
- struct list_head *qe;
- list_for_each(qe, &tx->txq_q) {
- txq = (struct bna_txq *)qe;
+ list_for_each_entry(txq, &tx->txq_q, qe)
bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
- }
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d0a7a566f5d6..e0e797f2ea14 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -135,7 +135,6 @@ enum bna_tx_type {
enum bna_tx_flags {
BNA_TX_F_ENET_STARTED = 1,
BNA_TX_F_ENABLED = 2,
- BNA_TX_F_PRIO_CHANGED = 4,
BNA_TX_F_BW_UPDATED = 8,
};
@@ -182,17 +181,11 @@ enum bna_rx_mod_flags {
BNA_RX_MOD_F_ENET_LOOPBACK = 2,
};
-enum bna_rxf_flags {
- BNA_RXF_F_PAUSED = 1,
-};
-
enum bna_rxf_event {
RXF_E_START = 1,
RXF_E_STOP = 2,
RXF_E_FAIL = 3,
RXF_E_CONFIG = 4,
- RXF_E_PAUSE = 5,
- RXF_E_RESUME = 6,
RXF_E_FW_RESP = 7,
};
@@ -362,9 +355,6 @@ struct bna_enet {
void (*stop_cbfn)(void *);
void *stop_cbarg;
- /* Callback for bna_enet_pause_config() */
- void (*pause_cbfn)(struct bnad *);
-
/* Callback for bna_enet_mtu_set() */
void (*mtu_cbfn)(struct bnad *);
@@ -498,9 +488,6 @@ struct bna_tx {
void (*stop_cbfn)(void *arg, struct bna_tx *tx);
void *stop_cbarg;
- /* callback for bna_tx_prio_set() */
- void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx);
-
struct bfa_msgq_cmd_entry msgq_cmd;
union {
struct bfi_enet_tx_cfg_req cfg_req;
@@ -676,7 +663,6 @@ struct bna_rx_config {
enum bna_rx_type rx_type;
int num_paths;
enum bna_rxp_type rxp_type;
- int paused;
int coalescing_timeo;
/*
* Small/Large (or Header/Data) buffer size to be configured
@@ -721,7 +707,6 @@ struct bna_rxp {
/* RxF structure (hardware Rx Function) */
struct bna_rxf {
bfa_fsm_t fsm;
- enum bna_rxf_flags flags;
struct bfa_msgq_cmd_entry msgq_cmd;
union {
@@ -742,10 +727,6 @@ struct bna_rxf {
void (*stop_cbfn) (struct bna_rx *rx);
struct bna_rx *stop_cbarg;
- /* callback for bna_rx_receive_pause() / bna_rx_receive_resume() */
- void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx);
- struct bnad *oper_state_cbarg;
-
/**
* callback for:
* bna_rxf_ucast_set()
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index caae6cb2bc1a..6be31ae7b5c9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -57,7 +57,8 @@ static u32 bnad_rxqs_per_cq = 2;
static u32 bna_id;
static struct mutex bnad_list_mutex;
static LIST_HEAD(bnad_list);
-static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static const u8 bnad_bcast_addr[] __aligned(2) =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/*
* Local MACROS
@@ -724,7 +725,6 @@ next:
cmpl->valid = 0;
BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
}
- cmpl = &cq[ccb->producer_index];
}
napi_gro_flush(&rx_ctrl->napi, false);
@@ -875,9 +875,9 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
- memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
+ ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -946,8 +946,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
uint tx_id, tcb_id;
- printk(KERN_WARNING "bna: %s link up\n",
- bnad->netdev->name);
+ netdev_info(bnad->netdev, "link up\n");
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
@@ -966,10 +965,6 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
/*
* Force an immediate
* Transmit Schedule */
- printk(KERN_INFO "bna: %s %d "
- "TXQ_STARTED\n",
- bnad->netdev->name,
- txq_id);
netif_wake_subqueue(
bnad->netdev,
txq_id);
@@ -987,8 +982,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad,
}
} else {
if (netif_carrier_ok(bnad->netdev)) {
- printk(KERN_WARNING "bna: %s link down\n",
- bnad->netdev->name);
+ netdev_info(bnad->netdev, "link down\n");
netif_carrier_off(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
}
@@ -1058,8 +1052,6 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
txq_id = tcb->id;
clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
netif_stop_subqueue(bnad->netdev, txq_id);
- printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
- bnad->netdev->name, txq_id);
}
}
@@ -1082,8 +1074,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
BUG_ON(*(tcb->hw_consumer_index) != 0);
if (netif_carrier_ok(bnad->netdev)) {
- printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
- bnad->netdev->name, txq_id);
netif_wake_subqueue(bnad->netdev, txq_id);
BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
}
@@ -1094,8 +1084,8 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* get a 0 MAC address. We try to get the MAC address
* again here.
*/
- if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
- bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
+ if (is_zero_ether_addr(bnad->perm_addr)) {
+ bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
}
}
@@ -1703,7 +1693,7 @@ bnad_ioc_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1714,7 +1704,7 @@ bnad_ioc_hb_check(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1725,7 +1715,7 @@ bnad_iocpf_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1736,7 +1726,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
+ bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -1862,8 +1852,7 @@ bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
struct netdev_hw_addr *mc_addr;
netdev_for_each_mc_addr(mc_addr, netdev) {
- memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
- ETH_ALEN);
+ ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
i++;
}
}
@@ -2137,7 +2126,7 @@ bnad_reinit_rx(struct bnad *bnad)
current_err = bnad_setup_rx(bnad, rx_id);
if (current_err && !err) {
err = current_err;
- pr_err("RXQ:%u setup failed\n", rx_id);
+ netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
}
}
@@ -2349,7 +2338,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
if (!bnad->rx_info[0].rx)
return 0;
- ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
+ ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
if (ret != BNA_CB_SUCCESS)
return -EADDRNOTAVAIL;
@@ -2673,8 +2662,9 @@ bnad_enable_msix(struct bnad *bnad)
if (ret < 0) {
goto intx_mode;
} else if (ret < bnad->msix_num) {
- pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
- ret, bnad->msix_num);
+ dev_warn(&bnad->pcidev->dev,
+ "%d MSI-X vectors allocated < %d requested\n",
+ ret, bnad->msix_num);
spin_lock_irqsave(&bnad->bna_lock, flags);
/* ret = #of vectors that we got */
@@ -2696,7 +2686,8 @@ bnad_enable_msix(struct bnad *bnad)
return;
intx_mode:
- pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
+ dev_warn(&bnad->pcidev->dev,
+ "MSI-X enable failed - operating in INTx mode\n");
kfree(bnad->msix_table);
bnad->msix_table = NULL;
@@ -2754,7 +2745,7 @@ bnad_open(struct net_device *netdev)
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_enet_mtu_set(&bnad->bna.enet,
BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
- bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config);
bna_enet_enable(&bnad->bna.enet);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3128,7 +3119,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
int entry;
if (netdev_uc_empty(bnad->netdev)) {
- bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
return;
}
@@ -3141,13 +3132,11 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
entry = 0;
netdev_for_each_uc_addr(ha, netdev) {
- memcpy(&mac_list[entry * ETH_ALEN],
- &ha->addr[0], ETH_ALEN);
+ ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
entry++;
}
- ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
- mac_list, NULL);
+ ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
@@ -3158,7 +3147,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad)
/* ucast packets not in UCAM are routed to default function */
mode_default:
bnad->cfg_flags |= BNAD_CF_DEFAULT;
- bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
+ bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
}
static void
@@ -3183,12 +3172,11 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
if (mac_list == NULL)
goto mode_allmulti;
- memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
+ ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
/* copy rest of the MCAST addresses */
bnad_netdev_mc_list_get(netdev, mac_list);
- ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
- mac_list, NULL);
+ ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
kfree(mac_list);
if (ret != BNA_CB_SUCCESS)
@@ -3198,7 +3186,7 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad)
mode_allmulti:
bnad->cfg_flags |= BNAD_CF_ALLMULTI;
- bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
+ bna_rx_mcast_delall(bnad->rx_info[0].rx);
}
void
@@ -3237,7 +3225,7 @@ bnad_set_rx_mode(struct net_device *netdev)
mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
BNA_RXMODE_ALLMULTI;
- bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
+ bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3248,19 +3236,18 @@ bnad_set_rx_mode(struct net_device *netdev)
* in a non-blocking context.
*/
static int
-bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
+bnad_set_mac_address(struct net_device *netdev, void *addr)
{
int err;
struct bnad *bnad = netdev_priv(netdev);
- struct sockaddr *sa = (struct sockaddr *)mac_addr;
+ struct sockaddr *sa = (struct sockaddr *)addr;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
-
if (!err)
- memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3487,8 +3474,8 @@ bnad_init(struct bnad *bnad,
dev_err(&pdev->dev, "ioremap for bar0 failed\n");
return -ENOMEM;
}
- pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
- (unsigned long long) bnad->mmio_len);
+ dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
+ (unsigned long long) bnad->mmio_len);
spin_lock_irqsave(&bnad->bna_lock, flags);
if (!bnad_msix_disable)
@@ -3609,13 +3596,10 @@ bnad_pci_probe(struct pci_dev *pdev,
struct bfa_pcidev pcidev_info;
unsigned long flags;
- pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
- pdev, pcidev_id, PCI_FUNC(pdev->devfn));
-
mutex_lock(&bnad_fwimg_mutex);
if (!cna_get_firmware_buf(pdev)) {
mutex_unlock(&bnad_fwimg_mutex);
- pr_warn("Failed to load Firmware Image!\n");
+ dev_err(&pdev->dev, "failed to load firmware image!\n");
return -ENODEV;
}
mutex_unlock(&bnad_fwimg_mutex);
@@ -3708,8 +3692,7 @@ bnad_pci_probe(struct pci_dev *pdev,
*/
err = bnad_ioceth_enable(bnad);
if (err) {
- pr_err("BNA: Initialization failed err=%d\n",
- err);
+ dev_err(&pdev->dev, "initialization failed err=%d\n", err);
goto probe_success;
}
@@ -3742,7 +3725,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Get the burnt-in mac */
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
+ bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3751,7 +3734,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {
- pr_err("BNA : Registering with netdev failed\n");
+ dev_err(&pdev->dev, "registering net device failed\n");
goto probe_uninit;
}
set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
@@ -3803,7 +3786,6 @@ bnad_pci_remove(struct pci_dev *pdev)
if (!netdev)
return;
- pr_info("%s bnad_pci_remove\n", netdev->name);
bnad = netdev_priv(netdev);
bna = &bnad->bna;
@@ -3864,15 +3846,14 @@ bnad_module_init(void)
{
int err;
- pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n",
- BNAD_VERSION);
+ pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
+ BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
err = pci_register_driver(&bnad_pci_driver);
if (err < 0) {
- pr_err("bna : PCI registration failed in module init "
- "(%d)\n", err);
+ pr_err("bna: PCI driver registration failed err=%d\n", err);
return err;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 7ead6c23edb6..91fd82d9e9bc 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -344,7 +344,7 @@ struct bnad {
struct bnad_completion bnad_completions;
/* Burnt in MAC address */
- mac_t perm_addr;
+ u8 perm_addr[ETH_ALEN];
struct workqueue_struct *work_q;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 72c89550417c..c0fd737e7486 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -76,8 +76,7 @@ bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bnad %s: Failed to collect fwtrc\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect fwtrc\n");
return -ENOMEM;
}
@@ -117,8 +116,7 @@ bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->debug_buffer = NULL;
kfree(fw_debug);
fw_debug = NULL;
- pr_warn("bna %s: Failed to collect fwsave\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect fwsave\n");
return -ENOMEM;
}
@@ -217,8 +215,7 @@ bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
drv_info->debug_buffer = NULL;
kfree(drv_info);
drv_info = NULL;
- pr_warn("bna %s: Failed to collect drvinfo\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to collect drvinfo\n");
return -ENOMEM;
}
@@ -321,27 +318,20 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- /* Allocate memory to store the user space buf */
- kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf)
- return -ENOMEM;
-
- if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
- kfree(kern_buf);
- return -ENOMEM;
- }
+ /* Copy the user space buf */
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
if (rc < 2) {
- pr_warn("bna %s: Failed to read user buffer\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
kfree(kern_buf);
kfree(bnad->regdata);
- bnad->regdata = NULL;
bnad->reglen = 0;
bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
@@ -355,8 +345,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, len);
if (rc) {
- pr_warn("bna %s: Failed reg offset check\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed reg offset check\n");
kfree(bnad->regdata);
bnad->regdata = NULL;
bnad->reglen = 0;
@@ -388,20 +377,14 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- /* Allocate memory to store the user space buf */
- kern_buf = kzalloc(nbytes, GFP_KERNEL);
- if (!kern_buf)
- return -ENOMEM;
-
- if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
- kfree(kern_buf);
- return -ENOMEM;
- }
+ /* Copy the user space buf */
+ kern_buf = memdup_user(buf, nbytes);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &val);
if (rc < 2) {
- pr_warn("bna %s: Failed to read user buffer\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed to read user buffer\n");
kfree(kern_buf);
return -EINVAL;
}
@@ -412,8 +395,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
/* offset and len sanity check */
rc = bna_reg_offset_check(ioc, addr, 1);
if (rc) {
- pr_warn("bna %s: Failed reg offset check\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev, "failed reg offset check\n");
return -EINVAL;
}
@@ -525,7 +507,8 @@ bnad_debugfs_init(struct bnad *bnad)
bna_debugfs_root = debugfs_create_dir("bna", NULL);
atomic_set(&bna_debugfs_port_count, 0);
if (!bna_debugfs_root) {
- pr_warn("BNA: debugfs root dir creation failed\n");
+ netdev_warn(bnad->netdev,
+ "debugfs root dir creation failed\n");
return;
}
}
@@ -536,8 +519,8 @@ bnad_debugfs_init(struct bnad *bnad)
bnad->port_debugfs_root =
debugfs_create_dir(name, bna_debugfs_root);
if (!bnad->port_debugfs_root) {
- pr_warn("bna pci_dev %s: root dir creation failed\n",
- pci_name(bnad->pcidev));
+ netdev_warn(bnad->netdev,
+ "debugfs root dir creation failed\n");
return;
}
@@ -552,9 +535,9 @@ bnad_debugfs_init(struct bnad *bnad)
bnad,
file->fops);
if (!bnad->bnad_dentry_files[i]) {
- pr_warn(
- "BNA pci_dev:%s: create %s entry failed\n",
- pci_name(bnad->pcidev), file->name);
+ netdev_warn(bnad->netdev,
+ "create %s entry failed\n",
+ file->name);
return;
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 12f344debd1c..2bdfc5dff4b1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -445,13 +445,13 @@ bnad_set_ringparam(struct net_device *netdev,
if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
- !BNA_POWER_OF_2(ringparam->rx_pending)) {
+ !is_power_of_2(ringparam->rx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
- !BNA_POWER_OF_2(ringparam->tx_pending)) {
+ !is_power_of_2(ringparam->tx_pending)) {
mutex_unlock(&bnad->conf_mutex);
return -EINVAL;
}
@@ -533,7 +533,7 @@ bnad_set_pauseparam(struct net_device *netdev,
pause_config.rx_pause = pauseparam->rx_pause;
pause_config.tx_pause = pauseparam->tx_pause;
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
+ bna_enet_pause_config(&bnad->bna.enet, &pause_config);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
mutex_unlock(&bnad->conf_mutex);
@@ -1080,7 +1080,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
if (ret) {
- pr_err("BNA: Can't locate firmware %s\n", eflash->data);
+ netdev_err(netdev, "can't load firmware %s\n", eflash->data);
goto out;
}
@@ -1093,7 +1093,7 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
bnad->id, (u8 *)fw->data, fw->size, 0,
bnad_cb_completion, &fcomp);
if (ret != BFA_STATUS_OK) {
- pr_warn("BNA: Flash update failed with err: %d\n", ret);
+ netdev_warn(netdev, "flash update failed with err=%d\n", ret);
ret = -EIO;
spin_unlock_irq(&bnad->bna_lock);
goto out;
@@ -1103,8 +1103,9 @@ bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
wait_for_completion(&fcomp.comp);
if (fcomp.comp_status != BFA_STATUS_OK) {
ret = -EIO;
- pr_warn("BNA: Firmware image update to flash failed with: %d\n",
- fcomp.comp_status);
+ netdev_warn(netdev,
+ "firmware image update failed with err=%d\n",
+ fcomp.comp_status);
}
out:
release_firmware(fw);
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 28e7d0ffeab1..75f8f1ac9fb7 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -42,66 +42,4 @@ extern char bfa_version[];
#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
-#pragma pack(1)
-
-typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
-
-#pragma pack()
-
-#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
-#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
-#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
-
-/*
- * bfa_q_qe_init - to initialize a queue element
- */
-#define bfa_q_qe_init(_qe) { \
- bfa_q_next(_qe) = (struct list_head *) NULL; \
- bfa_q_prev(_qe) = (struct list_head *) NULL; \
-}
-
-/*
- * bfa_q_deq - dequeue an element from head of the queue
- */
-#define bfa_q_deq(_q, _qe) { \
- if (!list_empty(_q)) { \
- (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
- bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **)(_qe)) = NULL; \
- } \
-}
-
-/*
- * bfa_q_deq_tail - dequeue an element from tail of the queue
- */
-#define bfa_q_deq_tail(_q, _qe) { \
- if (!list_empty(_q)) { \
- *((struct list_head **) (_qe)) = bfa_q_prev(_q); \
- bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \
- (struct list_head *) (_q); \
- bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
- bfa_q_qe_init(*((struct list_head **) _qe)); \
- } else { \
- *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
- } \
-}
-
-/*
- * bfa_add_tail_head - enqueue an element at the head of queue
- */
-#define bfa_q_enq_head(_q, _qe) { \
- if (!(bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL)) \
- pr_err("Assertion failure: %s:%d: %d", \
- __FILE__, __LINE__, \
- (bfa_q_next(_qe) == NULL) && (bfa_q_prev(_qe) == NULL));\
- bfa_q_next(_qe) = bfa_q_next(_q); \
- bfa_q_prev(_qe) = (struct list_head *) (_q); \
- bfa_q_prev(bfa_q_next(_q)) = (struct list_head *) (_qe); \
- bfa_q_next(_q) = (struct list_head *) (_qe); \
-}
-
#endif /* __CNA_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index badea368bdc8..2e7fb97883dc 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -33,7 +33,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 n;
if (request_firmware(&fw, fw_name, &pdev->dev)) {
- pr_alert("Can't locate firmware %s\n", fw_name);
+ dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name);
goto error;
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index fc646a41d548..740d04fd2223 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -54,6 +54,8 @@
#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
+#define GEM_MTU_MIN_SIZE 68
+
/*
* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -785,7 +787,7 @@ static int gem_rx(struct macb *bp, int budget)
}
/* now everything is ready for receiving packet */
bp->rx_skbuff[entry] = NULL;
- len = MACB_BFEXT(RX_FRMLEN, ctrl);
+ len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -831,7 +833,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
struct macb_dma_desc *desc;
desc = macb_rx_desc(bp, last_frag);
- len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+ len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
macb_rx_ring_wrap(first_frag),
@@ -1651,7 +1653,10 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
- config |= MACB_BIT(BIG); /* Receive oversized frames */
+ if (bp->caps & MACB_CAPS_JUMBO)
+ config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
+ else
+ config |= MACB_BIT(BIG); /* Receive oversized frames */
if (bp->dev->flags & IFF_PROMISC)
config |= MACB_BIT(CAF); /* Copy All Frames */
else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
@@ -1660,8 +1665,13 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(NBC); /* No BroadCast */
config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ gem_writel(bp, JML, bp->jumbo_max_len);
bp->speed = SPEED_10;
bp->duplex = DUPLEX_HALF;
+ bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
macb_configure_dma(bp);
@@ -1865,6 +1875,26 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static int macb_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct macb *bp = netdev_priv(dev);
+ u32 max_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ max_mtu = ETH_DATA_LEN;
+ if (bp->caps & MACB_CAPS_JUMBO)
+ max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+
+ if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static void gem_update_stats(struct macb *bp)
{
int i;
@@ -2141,7 +2171,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_get_stats = macb_get_stats,
.ndo_do_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = macb_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macb_poll_controller,
@@ -2702,6 +2732,16 @@ static const struct macb_config emac_config = {
.init = at91ether_init,
};
+
+static const struct macb_config zynqmp_config = {
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config zynq_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_NO_GIGABIT_HALF,
@@ -2720,6 +2760,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
{ /* sentinel */ }
};
@@ -2789,6 +2830,10 @@ static int macb_probe(struct platform_device *pdev)
bp->pclk = pclk;
bp->hclk = hclk;
bp->tx_clk = tx_clk;
+ if (macb_config->jumbo_max_len) {
+ bp->jumbo_max_len = macb_config->jumbo_max_len;
+ }
+
spin_lock_init(&bp->lock);
/* setup capabilities */
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 24b1d9bcd865..d74655993d4b 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -71,6 +71,7 @@
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
+#define GEM_JML 0x0048 /* Jumbo Max Length */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
#define GEM_SA1B 0x0088 /* Specific1 Bottom */
@@ -398,6 +399,7 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_JUMBO 0x00000008
/* Bit manipulation macros */
#define MACB_BIT(name) \
@@ -515,6 +517,9 @@ struct macb_dma_desc {
#define MACB_RX_BROADCAST_OFFSET 31
#define MACB_RX_BROADCAST_SIZE 1
+#define MACB_RX_FRMLEN_MASK 0xFFF
+#define MACB_RX_JFRMLEN_MASK 0x3FFF
+
/* RX checksum offload disabled: bit 24 clear in NCFGR */
#define GEM_RX_TYPEID_MATCH_OFFSET 22
#define GEM_RX_TYPEID_MATCH_SIZE 2
@@ -758,6 +763,7 @@ struct macb_config {
int (*clk_init)(struct platform_device *pdev, struct clk **pclk,
struct clk **hclk, struct clk **tx_clk);
int (*init)(struct platform_device *pdev);
+ int jumbo_max_len;
};
struct macb_queue {
@@ -827,6 +833,9 @@ struct macb {
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN];
+
+ unsigned int rx_frm_len_mask;
+ unsigned int jumbo_max_len;
};
static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644
index 000000000000..5e7a0e270d54
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -0,0 +1,57 @@
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+ tristate "Cavium ethernet drivers"
+ depends on PCI
+ default y
+ ---help---
+ Select this option if you want enable Cavium network support.
+
+ If you have a Cavium SoC or network adapter, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+ tristate "Thunder Physical function driver"
+ depends on 64BIT
+ default ARCH_THUNDER
+ select THUNDER_NIC_BGX
+ ---help---
+ This driver supports Thunder's NIC physical function.
+ The NIC provides the controller and DMA engines to
+ move network traffic to/from the memory. The NIC
+ works closely with TNS, BGX and SerDes to implement the
+ functions replacing and virtualizing those of a typical
+ standalone PCIe NIC chip.
+
+config THUNDER_NIC_VF
+ tristate "Thunder Virtual function driver"
+ depends on 64BIT
+ default ARCH_THUNDER
+ ---help---
+ This driver supports Thunder's NIC virtual function
+
+config THUNDER_NIC_BGX
+ tristate "Thunder MAC interface driver (BGX)"
+ depends on 64BIT
+ default ARCH_THUNDER
+ ---help---
+ This driver supports programming and controlling of MAC
+ interface from NIC physical function driver.
+
+config LIQUIDIO
+ tristate "Cavium LiquidIO support"
+ depends on 64BIT
+ select PTP_1588_CLOCK
+ select FW_LOADER
+ select LIBCRC32
+ ---help---
+ This driver supports Cavium LiquidIO Intelligent Server Adapters
+ based on CN66XX and CN68XX chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called liquidio. This is recommended.
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644
index 000000000000..d22f886ac291
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
new file mode 100644
index 000000000000..2f366806835d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -0,0 +1,16 @@
+#
+# Cavium Liquidio ethernet device driver
+#
+obj-$(CONFIG_LIQUIDIO) += liquidio.o
+
+liquidio-objs := lio_main.o \
+ lio_ethtool.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_console.o \
+ octeon_nic.o
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
new file mode 100644
index 000000000000..8ad7425f89bf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -0,0 +1,796 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+int lio_cn6xxx_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
+
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
+ octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
+
+ lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
+
+ /* make sure that the reset is written before starting timer */
+ mmiowb();
+
+ /* Wait for 10ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+ dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 val;
+
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+ if (val & 0x000f0000) {
+ dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
+ val & 0x000f0000);
+ }
+
+ val |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+}
+
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MPS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mps == PCIE_MPS_DEFAULT) {
+ mps = ((val & (0x7 << 5)) >> 5);
+ } else {
+ val &= ~(0x7 << 5); /* Turn off any MPS bits */
+ val |= (mps << 5); /* Set MPS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= (mps << 4);
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MRRS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mrrs == PCIE_MRRS_DEFAULT) {
+ mrrs = ((val & (0x7 << 12)) >> 12);
+ } else {
+ val &= ~(0x7 << 12); /* Turn off any MRRS bits */
+ val |= (mrrs << 12); /* Set MRRS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
+ r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
+ r64 |= mrrs;
+ octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
+
+ /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= mrrs;
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
+ * for SLI.
+ */
+ return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
+}
+
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
+ u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
+
+ /* core clock per us / oq ticks will be fractional. TO avoid that
+ * we use the method below.
+ */
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
+{
+ /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
+ CN6XXX_INPUT_CTL_MASK);
+
+ /* Instruction Read Size - Max 4 instructions per PCIE Read */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
+ 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Select PCIE Port for all Input rings. */
+ octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
+ (oct->pcie_port * 0x5555555555555555ULL));
+}
+
+static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ u64 pktctl;
+
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 66XX SPECIFIC */
+ if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
+ /* Disable RING_EN if only upto 4 rings are used. */
+ pktctl &= ~(1 << 4);
+ else
+ pktctl |= (1 << 4);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 time_threshold;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ /* / Select PCI-E Port for all Output queues */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
+ (oct->pcie_port * 0x5555555555555555ULL));
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
+ } else {
+ /* / Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
+ }
+
+ /* / Select Info Ptr for length & data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF);
+
+ /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
+
+ /* / Select ES,RO,NS setting from register for Output Queue Packet
+ * Address
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
+ * Queue ScatterList
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
+
+ /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
+#ifdef __BIG_ENDIAN_BITFIELD
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
+ 0x5555555555555555ULL);
+#else
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
+#endif
+
+ /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
+ 0x5555555555555555ULL);
+
+ /* / Set up interrupt packet and time threshold */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
+ time_threshold =
+ lio_cn6xxx_get_oq_ticks(oct, (u32)
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+}
+
+static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn66xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+ return 0;
+}
+
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this
+ * queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr
+ + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter
+ * (used in flush_iq calculation)
+ */
+ iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
+}
+
+static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ lio_cn6xxx_setup_iq_regs(oct, iq_no);
+
+ /* Backpressure for this queue - WMARK set to all F's. This effectively
+ * disables the backpressure mechanism.
+ */
+ octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
+ (0xFFFFFFFFULL << 32));
+}
+
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 intr;
+ struct octeon_droq *droq = oct->droq[oq_no];
+
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCT_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
+}
+
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+{
+ u32 mask;
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
+ mask |= oct->io_qmask.iq64B;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask |= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask |= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+}
+
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
+{
+ u32 mask, i, loop = HZ;
+ u32 d32;
+
+ /* Reset the Enable bits for Input Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask ^= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ mask = oct->io_qmask.iq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for each Input queue. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
+ }
+
+ /* Reset the Enable bits for Output Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask ^= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ loop = HZ;
+ mask = oct->io_qmask.oq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ schedule_timeout_uninterruptible(1);
+ }
+ ;
+
+ /* Reset the doorbell register for each Output queue. */
+ /* for (i = 0; i < oct->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
+ }
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
+}
+
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ oct->fn_list.setup_iq_regs(oct, i);
+ }
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ oct->fn_list.setup_oq_regs(oct, i);
+ }
+
+ oct->fn_list.setup_device_regs(oct);
+
+ oct->fn_list.enable_interrupt(oct->chip);
+
+ oct->fn_list.enable_io_queues(oct);
+
+ /* for (i = 0; i < oct->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
+ }
+}
+
+void
+lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
+ u64 core_addr,
+ u32 idx,
+ int valid)
+{
+ u64 bar1;
+
+ if (valid == 0) {
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ return;
+ }
+
+ /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
+ * the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
+ u32 idx,
+ u32 mask)
+{
+ lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+ struct octeon_instr_queue *iq)
+{
+ u32 new_idx = readl(iq->inst_cnt_reg);
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over. We have
+ * noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->max_count;
+
+ return new_idx;
+}
+
+void lio_cn6xxx_enable_interrupt(void *chip)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+ u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
+
+ /* Enable Interrupt */
+ writeq(mask, cn6xxx->intr_enb_reg64);
+}
+
+void lio_cn6xxx_disable_interrupt(void *chip)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ /* Disable Interrupts */
+ writeq(0, cn6xxx->intr_enb_reg64);
+
+ /* make sure interrupts are really disabled */
+ mmiowb();
+}
+
+static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
+ * to determine the PCIE port #
+ */
+ oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
+}
+
+void
+lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
+{
+ dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
+ CVM_CAST64(intr64));
+}
+
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+{
+ struct octeon_droq *droq;
+ u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ u32 droq_cnt_enb, droq_cnt_mask;
+
+ droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ droq_mask = droq_cnt_mask & droq_cnt_enb;
+
+ droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ droq_mask |= (droq_time_mask & droq_int_enb);
+
+ droq_mask &= oct->io_qmask.oq;
+
+ oct->droq_intr = 0;
+
+ /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+ if (!(droq_mask & (1 << oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+ pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+ if (pkt_count) {
+ oct->droq_intr |= (1ULL << oq_no);
+ if (droq->ops.poll_mode) {
+ u32 value;
+ u32 reg;
+
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ /* disable interrupts for this droq */
+ spin_lock
+ (&cn6xxx->lock_for_droq_int_enb_reg);
+ reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+ reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+
+ /* Ensure that the enable register is written.
+ */
+ mmiowb();
+
+ spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
+ }
+ }
+ }
+
+ droq_time_mask &= oct->io_qmask.oq;
+ droq_cnt_mask &= oct->io_qmask.oq;
+
+ /* Reset the PKT_CNT/TIME_INT registers. */
+ if (droq_time_mask)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
+
+ if (droq_cnt_mask) /* reset PKT_CNT register:66xx */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
+
+ return 0;
+}
+
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u64 intr64;
+
+ intr64 = readq(cn6xxx->intr_sum_reg64);
+
+ /* If our device has interrupted, then proceed.
+ * Also check for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
+ return IRQ_NONE;
+
+ oct->int_status = 0;
+
+ if (intr64 & CN6XXX_INTR_ERR)
+ lio_cn6xxx_process_pcie_error_intr(oct, intr64);
+
+ if (intr64 & CN6XXX_INTR_PKT_DATA) {
+ lio_cn6xxx_process_droq_intr_regs(oct);
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & CN6XXX_INTR_DMA0_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+
+ if (intr64 & CN6XXX_INTR_DMA1_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn6xxx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
+ void *chip,
+ struct octeon_reg_list *reg_list)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ reg_list->pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
+ reg_list->pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
+ reg_list->pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
+
+ reg_list->pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
+ reg_list->pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
+ reg_list->pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
+
+ reg_list->pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
+ reg_list->pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
+ reg_list->pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
+
+ reg_list->pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
+ reg_list->pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
+ reg_list->pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
+
+ lio_cn6xxx_get_pcie_qlmport(oct);
+
+ cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
+ cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
+ cn6xxx->intr_enb_reg64 =
+ bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
+}
+
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
+ oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ cn6xxx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, LIO_210SV);
+ if (!cn6xxx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *conf6xxx)
+{
+ /* int total_instrs = 0; */
+
+ if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+ if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) ||
+ !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
new file mode 100644
index 000000000000..f77918779355
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -0,0 +1,107 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_device.h
+ * \brief Host Driver: Routines that perform CN66XX specific operations.
+ */
+
+#ifndef __CN66XX_DEVICE_H__
+#define __CN66XX_DEVICE_H__
+
+/* Register address and configuration for a CN6XXX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn6xxx {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+
+ /* Example additional fields - not used currently
+ * struct {
+ * }cn6xyz;
+ */
+
+ /* For the purpose of atomic access to interrupt enable reg */
+ spinlock_t lock_for_droq_int_enb_reg;
+
+};
+
+enum octeon_pcie_mps {
+ PCIE_MPS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MPS_128B = 0,
+ PCIE_MPS_256B = 1
+};
+
+enum octeon_pcie_mrrs {
+ PCIE_MRRS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MRRS_128B = 0,
+ PCIE_MRRS_256B = 1,
+ PCIE_MRRS_512B = 2,
+ PCIE_MRRS_1024B = 3,
+ PCIE_MRRS_2048B = 4,
+ PCIE_MRRS_4096B = 5
+};
+
+/* Common functions for 66xx and 68xx */
+int lio_cn6xxx_soft_reset(struct octeon_device *oct);
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct);
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps);
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs);
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
+void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
+int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
+void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
+void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid);
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
+u32
+lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
+ struct octeon_instr_queue *iq);
+void lio_cn6xxx_enable_interrupt(void *chip);
+void lio_cn6xxx_disable_interrupt(void *chip);
+void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
+ struct octeon_reg_list *reg_list);
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+int lio_setup_cn66xx_octeon_device(struct octeon_device *);
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
new file mode 100644
index 000000000000..5e3aff242ad3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -0,0 +1,535 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn66xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN66XX devices.
+ */
+
+#ifndef __CN66XX_REGS_H__
+#define __CN66XX_REGS_H__
+
+#define CN6XXX_XPANSION_BAR 0x30
+
+#define CN6XXX_MSI_CAP 0x50
+#define CN6XXX_MSI_ADDR_LO 0x54
+#define CN6XXX_MSI_ADDR_HI 0x58
+#define CN6XXX_MSI_DATA 0x5C
+
+#define CN6XXX_PCIE_CAP 0x70
+#define CN6XXX_PCIE_DEVCAP 0x74
+#define CN6XXX_PCIE_DEVCTL 0x78
+#define CN6XXX_PCIE_LINKCAP 0x7C
+#define CN6XXX_PCIE_LINKCTL 0x80
+#define CN6XXX_PCIE_SLOTCAP 0x84
+#define CN6XXX_PCIE_SLOTCTL 0x88
+
+#define CN6XXX_PCIE_ENH_CAP 0x100
+#define CN6XXX_PCIE_UNCORR_ERR_STATUS 0x104
+#define CN6XXX_PCIE_UNCORR_ERR_MASK 0x108
+#define CN6XXX_PCIE_UNCORR_ERR 0x10C
+#define CN6XXX_PCIE_CORR_ERR_STATUS 0x110
+#define CN6XXX_PCIE_CORR_ERR_MASK 0x114
+#define CN6XXX_PCIE_ADV_ERR_CAP 0x118
+
+#define CN6XXX_PCIE_ACK_REPLAY_TIMER 0x700
+#define CN6XXX_PCIE_OTHER_MSG 0x704
+#define CN6XXX_PCIE_PORT_FORCE_LINK 0x708
+#define CN6XXX_PCIE_ACK_FREQ 0x70C
+#define CN6XXX_PCIE_PORT_LINK_CTL 0x710
+#define CN6XXX_PCIE_LANE_SKEW 0x714
+#define CN6XXX_PCIE_SYM_NUM 0x718
+#define CN6XXX_PCIE_FLTMSK 0x720
+
+/* ############## BAR0 Registers ################ */
+
+#define CN6XXX_SLI_CTL_PORT0 0x0050
+#define CN6XXX_SLI_CTL_PORT1 0x0060
+
+#define CN6XXX_SLI_WINDOW_CTL 0x02E0
+#define CN6XXX_SLI_DBG_DATA 0x0310
+#define CN6XXX_SLI_SCRATCH1 0x03C0
+#define CN6XXX_SLI_SCRATCH2 0x03D0
+#define CN6XXX_SLI_CTL_STATUS 0x0570
+
+#define CN6XXX_WIN_WR_ADDR_LO 0x0000
+#define CN6XXX_WIN_WR_ADDR_HI 0x0004
+#define CN6XXX_WIN_WR_ADDR64 CN6XXX_WIN_WR_ADDR_LO
+
+#define CN6XXX_WIN_RD_ADDR_LO 0x0010
+#define CN6XXX_WIN_RD_ADDR_HI 0x0014
+#define CN6XXX_WIN_RD_ADDR64 CN6XXX_WIN_RD_ADDR_LO
+
+#define CN6XXX_WIN_WR_DATA_LO 0x0020
+#define CN6XXX_WIN_WR_DATA_HI 0x0024
+#define CN6XXX_WIN_WR_DATA64 CN6XXX_WIN_WR_DATA_LO
+
+#define CN6XXX_WIN_RD_DATA_LO 0x0040
+#define CN6XXX_WIN_RD_DATA_HI 0x0044
+#define CN6XXX_WIN_RD_DATA64 CN6XXX_WIN_RD_DATA_LO
+
+#define CN6XXX_WIN_WR_MASK_LO 0x0030
+#define CN6XXX_WIN_WR_MASK_HI 0x0034
+#define CN6XXX_WIN_WR_MASK_REG CN6XXX_WIN_WR_MASK_LO
+
+/* 1 register (32-bit) to enable Input queues */
+#define CN6XXX_SLI_PKT_INSTR_ENB 0x1000
+
+/* 1 register (32-bit) to enable Output queues */
+#define CN6XXX_SLI_PKT_OUT_ENB 0x1010
+
+/* 1 register (32-bit) to determine whether Output queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_OQ 0x11F0
+
+/* 1 register (32-bit) to determine whether Input queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_IQ 0x11F4
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 1 register (32-bit) - instr. size of each input queue. */
+#define CN6XXX_SLI_PKT_INSTR_SIZE 0x1020
+
+/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN6XXX_SLI_IQ_INSTR_COUNT_START 0x2000
+
+/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN6XXX_SLI_IQ_BASE_ADDR_START64 0x2800
+
+/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN6XXX_SLI_IQ_DOORBELL_START 0x2C00
+
+/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN6XXX_SLI_IQ_SIZE_START 0x3000
+
+/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 0x3400
+
+/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */
+#define CN66XX_SLI_INPUT_BP_START64 0x3800
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_IQ_OFFSET 0x10
+
+/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT_INPUT_CONTROL.
+ */
+#define CN6XXX_SLI_PKT_INPUT_CONTROL 0x1170
+
+/* 1 register (64-bit) - Number of instructions to read at one time
+ * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE.
+ */
+#define CN6XXX_SLI_PKT_INSTR_RD_SIZE 0x11A0
+
+/* 1 register (64-bit) - Assign Input ring to MAC port
+ * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT.
+ */
+#define CN6XXX_SLI_IN_PCIE_PORT 0x11B0
+
+/*------- Request Queue Macros ---------*/
+#define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_SIZE(iq) \
+ (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \
+ (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_DOORBELL(iq) \
+ (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \
+ (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN66XX_SLI_IQ_BP64(iq) \
+ (CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB BIT(22)
+#define CN6XXX_INPUT_CTL_DATA_NS BIT(8)
+#define CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN6XXX_INPUT_CTL_DATA_RO BIT(5)
+#define CN6XXX_INPUT_CTL_USE_CSR BIT(4)
+#define CN6XXX_INPUT_CTL_GATHER_NS BIT(3)
+#define CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP BIT(2)
+#define CN6XXX_INPUT_CTL_GATHER_RO BIT(1)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR \
+ | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP)
+#else
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR)
+#endif
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN6XXX_SLI_OQ0_BUFF_INFO_SIZE 0x0C00
+
+/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN6XXX_SLI_OQ_BASE_ADDR_START64 0x1400
+
+/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN6XXX_SLI_OQ_PKT_CREDITS_START 0x1800
+
+/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN6XXX_SLI_OQ_SIZE_START 0x1C00
+
+/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN6XXX_SLI_OQ_PKT_SENT_START 0x2400
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_OQ_OFFSET 0x10
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_ROR
+ */
+#define CN6XXX_SLI_PKT_SLIST_ROR 0x1030
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_NS
+ */
+#define CN6XXX_SLI_PKT_SLIST_NS 0x1040
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue descriptors
+ * - SLI_PKT_SLIST_ES
+ */
+#define CN6XXX_SLI_PKT_SLIST_ES64 0x1050
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - InfoPtr mode for Output Queues.
+ * - SLI_PKT_IPTR
+ */
+#define CN6XXX_SLI_PKT_IPTR 0x1070
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - DPTR format selector for Output queues.
+ * - SLI_PKT_DPADDR
+ */
+#define CN6XXX_SLI_PKT_DPADDR 0x1080
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_ROR
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ROR 0x1090
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_NS
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_NS 0x10A0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue data
+ * - SLI_PKT_DATA_OUT_ES
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ES64 0x10B0
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets.
+ * - SLI_PKT_OUT_BMODE
+ */
+#define CN6XXX_SLI_PKT_OUT_BMODE 0x10D0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Assign PCIE port for Output queues
+ * - SLI_PKT_PCIE_PORT.
+ */
+#define CN6XXX_SLI_PKT_PCIE_PORT64 0x10E0
+
+/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold
+ * & Time Threshold. The same setting applies to all 32 queues.
+ * The register is defined as a 64-bit registers, but we use the
+ * 32-bit offsets to define distinct addresses.
+ */
+#define CN6XXX_SLI_OQ_INT_LEVEL_PKTS 0x1120
+#define CN6XXX_SLI_OQ_INT_LEVEL_TIME 0x1124
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN6XXX_SLI_OQ_WMARK 0x1180
+
+/* 1 register to control output queue global backpressure & ring enable. */
+#define CN6XXX_SLI_PKT_CTL 0x1220
+
+/*------- Output Queue Macros ---------*/
+#define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_SIZE(oq) \
+ (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_SENT(oq) \
+ (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN6XXX_DMA_CNT_START 0x0400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values
+ * SLI_DMA_0_TIM
+ */
+#define CN6XXX_DMA_TIM_START 0x0420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN6XXX_DMA_INT_LEVEL_START 0x03E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN6XXX_DMA_CNT(dq) \
+ (CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_PKT_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIME_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIM(dq) \
+ (CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN6XXX_SLI_INT_SUM64 0x0330
+
+/* 1 register (64-bit) for Interrupt Enable */
+#define CN6XXX_SLI_INT_ENB64_PORT0 0x0340
+#define CN6XXX_SLI_INT_ENB64_PORT1 0x0350
+
+/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */
+#define CN6XXX_SLI_PKT_CNT_INT_ENB 0x1150
+
+/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */
+#define CN6XXX_SLI_PKT_TIME_INT_ENB 0x1160
+
+/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */
+#define CN6XXX_SLI_PKT_CNT_INT 0x1130
+
+/* 1 register (32-bit) to indicate which Output Queue reached time threshold */
+#define CN6XXX_SLI_PKT_TIME_INT 0x1140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN6XXX_INTR_RML_TIMEOUT_ERR BIT(1)
+#define CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR BIT(2)
+#define CN6XXX_INTR_IO2BIG_ERR BIT(3)
+#define CN6XXX_INTR_PKT_COUNT BIT(4)
+#define CN6XXX_INTR_PKT_TIME BIT(5)
+#define CN6XXX_INTR_M0UPB0_ERR BIT(8)
+#define CN6XXX_INTR_M0UPWI_ERR BIT(9)
+#define CN6XXX_INTR_M0UNB0_ERR BIT(10)
+#define CN6XXX_INTR_M0UNWI_ERR BIT(11)
+#define CN6XXX_INTR_M1UPB0_ERR BIT(12)
+#define CN6XXX_INTR_M1UPWI_ERR BIT(13)
+#define CN6XXX_INTR_M1UNB0_ERR BIT(14)
+#define CN6XXX_INTR_M1UNWI_ERR BIT(15)
+#define CN6XXX_INTR_MIO_INT0 BIT(16)
+#define CN6XXX_INTR_MIO_INT1 BIT(17)
+#define CN6XXX_INTR_MAC_INT0 BIT(18)
+#define CN6XXX_INTR_MAC_INT1 BIT(19)
+
+#define CN6XXX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN6XXX_INTR_DMA1_FORCE BIT_ULL(33)
+#define CN6XXX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN6XXX_INTR_DMA1_COUNT BIT_ULL(35)
+#define CN6XXX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN6XXX_INTR_DMA1_TIME BIT_ULL(37)
+#define CN6XXX_INTR_INSTR_DB_OF_ERR BIT_ULL(48)
+#define CN6XXX_INTR_SLIST_DB_OF_ERR BIT_ULL(49)
+#define CN6XXX_INTR_POUT_ERR BIT_ULL(50)
+#define CN6XXX_INTR_PIN_BP_ERR BIT_ULL(51)
+#define CN6XXX_INTR_PGL_ERR BIT_ULL(52)
+#define CN6XXX_INTR_PDI_ERR BIT_ULL(53)
+#define CN6XXX_INTR_POP_ERR BIT_ULL(54)
+#define CN6XXX_INTR_PINS_ERR BIT_ULL(55)
+#define CN6XXX_INTR_SPRT0_ERR BIT_ULL(56)
+#define CN6XXX_INTR_SPRT1_ERR BIT_ULL(57)
+#define CN6XXX_INTR_ILL_PAD_ERR BIT_ULL(60)
+
+#define CN6XXX_INTR_DMA0_DATA (CN6XXX_INTR_DMA0_TIME)
+
+#define CN6XXX_INTR_DMA1_DATA (CN6XXX_INTR_DMA1_TIME)
+
+#define CN6XXX_INTR_DMA_DATA \
+ (CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA)
+
+#define CN6XXX_INTR_PKT_DATA (CN6XXX_INTR_PKT_TIME | \
+ CN6XXX_INTR_PKT_COUNT)
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN6XXX_INTR_PCIE_DATA \
+ (CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA)
+
+#define CN6XXX_INTR_MIO \
+ (CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1)
+
+#define CN6XXX_INTR_MAC \
+ (CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1)
+
+/* Sum of interrupts for error events */
+#define CN6XXX_INTR_ERR \
+ (CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR \
+ | CN6XXX_INTR_IO2BIG_ERR \
+ | CN6XXX_INTR_M0UPB0_ERR \
+ | CN6XXX_INTR_M0UPWI_ERR \
+ | CN6XXX_INTR_M0UNB0_ERR \
+ | CN6XXX_INTR_M0UNWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UPWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UNWI_ERR \
+ | CN6XXX_INTR_INSTR_DB_OF_ERR \
+ | CN6XXX_INTR_SLIST_DB_OF_ERR \
+ | CN6XXX_INTR_POUT_ERR \
+ | CN6XXX_INTR_PIN_BP_ERR \
+ | CN6XXX_INTR_PGL_ERR \
+ | CN6XXX_INTR_PDI_ERR \
+ | CN6XXX_INTR_POP_ERR \
+ | CN6XXX_INTR_PINS_ERR \
+ | CN6XXX_INTR_SPRT0_ERR \
+ | CN6XXX_INTR_SPRT1_ERR \
+ | CN6XXX_INTR_ILL_PAD_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN6XXX_INTR_MASK \
+ (CN6XXX_INTR_PCIE_DATA \
+ | CN6XXX_INTR_DMA0_FORCE \
+ | CN6XXX_INTR_DMA1_FORCE \
+ | CN6XXX_INTR_MIO \
+ | CN6XXX_INTR_MAC \
+ | CN6XXX_INTR_ERR)
+
+#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80
+#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90
+#define CN6XXX_SLI_S2M_PORTX_CTL(port) \
+ (CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10))
+
+#define CN6XXX_SLI_INT_ENB64(port) \
+ (CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10))
+
+#define CN6XXX_SLI_MAC_NUMBER 0x3E00
+
+/* CN6XXX BAR1 Index registers. */
+#define CN6XXX_PEM_BAR1_INDEX000 0x00011800C00000A8ULL
+#define CN6XXX_PEM_OFFSET 0x0000000001000000ULL
+
+#define CN6XXX_BAR1_INDEX_START CN6XXX_PEM_BAR1_INDEX000
+#define CN6XXX_PCI_BAR1_OFFSET 0x8
+
+#define CN6XXX_BAR1_REG(idx, port) \
+ (CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \
+ (CN6XXX_PCI_BAR1_OFFSET * (idx)))
+
+/*############################ DPI #########################*/
+
+#define CN6XXX_DPI_CTL 0x0001df0000000040ULL
+
+#define CN6XXX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+#define CN6XXX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+#define CN6XXX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+#define CN6XXX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+
+#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8))
+
+#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+
+#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8))
+
+#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL
+#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL
+#define CN6XXX_DPI_SLI_PRTX_CFG(port) \
+ (CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10))
+
+#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57)
+#define CN6XXX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN6XXX_DPI_DMA_O_ES BIT_ULL(15)
+#define CN6XXX_DPI_DMA_O_MODE BIT_ULL(14)
+
+#define CN6XXX_DPI_DMA_CTL_MASK \
+ (CN6XXX_DPI_DMA_COMMIT_MODE | \
+ CN6XXX_DPI_DMA_PKT_HP | \
+ CN6XXX_DPI_DMA_PKT_EN | \
+ CN6XXX_DPI_DMA_O_ES | \
+ CN6XXX_DPI_DMA_O_MODE)
+
+/*############################ CIU #########################*/
+
+#define CN6XXX_CIU_SOFT_BIST 0x0001070000000738ULL
+#define CN6XXX_CIU_SOFT_RST 0x0001070000000740ULL
+
+/*############################ MIO #########################*/
+#define CN6XXX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL
+#define CN6XXX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL
+#define CN6XXX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL
+#define CN6XXX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL
+#define CN6XXX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL
+#define CN6XXX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define CN6XXX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL
+#define CN6XXX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL
+#define CN6XXX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL
+#define CN6XXX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL
+
+#define CN6XXX_MIO_QLM4_CFG 0x00011800000015B0ULL
+#define CN6XXX_MIO_RST_BOOT 0x0001180000001600ULL
+
+#define CN6XXX_MIO_QLM_CFG_MASK 0x7
+
+/*############################ LMC #########################*/
+
+#define CN6XXX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
new file mode 100644
index 000000000000..8e830d0c0754
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
+{
+ u32 i;
+ u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 };
+
+ lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ /* Prevent service of instruction queue for all DMA engines
+ * Engine 5 will remain 0. Engines 0 - 4 will be setup by
+ * core.
+ */
+ lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i));
+ lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i));
+ dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i,
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set
+ * separately.
+ */
+
+ lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_CTL));
+}
+
+static int lio_cn68xx_soft_reset(struct octeon_device *oct)
+{
+ lio_cn6xxx_soft_reset(oct);
+ lio_cn68xx_set_dpi_regs(oct);
+
+ return 0;
+}
+
+static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u64 pktctl, tx_pipe, max_oqs;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 68XX specific */
+ max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf));
+ tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
+ tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
+ tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
+ octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+static int lio_cn68xx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn68xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+
+ return 0;
+}
+
+static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
+{
+ u32 val = 0;
+
+ /* Set M_VEND1_DRP and M_VEND0_DRP bits */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val);
+ val |= 0x3;
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
+}
+
+int lio_is_210nv(struct octeon_device *oct)
+{
+ u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
+
+ return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0);
+}
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u16 card_type = LIO_410NV;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
+ oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ /* Determine variant of card */
+ if (lio_is_210nv(oct))
+ card_type = LIO_210NV;
+
+ cn68xx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, card_type);
+ if (!cn68xx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n",
+ __func__,
+ (card_type == LIO_410NV) ? LIO_410NV_NAME :
+ LIO_210NV_NAME);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ lio_cn68xx_vendor_message_fix(oct);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
new file mode 100644
index 000000000000..d4e1c9fb0bf2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -0,0 +1,33 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_device.h
+ * \brief Host Driver: Routines that perform CN68XX specific operations.
+ */
+
+#ifndef __CN68XX_DEVICE_H__
+#define __CN68XX_DEVICE_H__
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
+int lio_is_210nv(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
new file mode 100644
index 000000000000..38cddbd107b6
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -0,0 +1,51 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file cn68xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN68XX devices. The register map for CN66XX is the same
+ * for most registers. This file has the other registers that are
+ * 68XX-specific.
+ */
+
+#ifndef __CN68XX_REGS_H__
+#define __CN68XX_REGS_H__
+#include "cn66xx_regs.h"
+
+/*###################### REQUEST QUEUE #########################*/
+
+#define CN68XX_SLI_IQ_PORT0_PKIND 0x0800
+
+#define CN68XX_SLI_IQ_PORT_PKIND(iq) \
+ (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* Starting pipe number and number of pipes used by the SLI packet output. */
+#define CN68XX_SLI_TX_PIPE 0x1230
+
+/*######################## INTERRUPTS #########################*/
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN68XX_INTR_PIPE_ERR BIT_ULL(61)
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
new file mode 100644
index 000000000000..160f8077692c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -0,0 +1,1216 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/ethtool.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+struct oct_mdio_cmd_context {
+ int octeon_id;
+ wait_queue_head_t wc;
+ int cond;
+};
+
+struct oct_mdio_cmd_resp {
+ u64 rh;
+ struct oct_mdio_cmd resp;
+ u64 status;
+};
+
+#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
+
+/* Octeon's interface mode of operation */
+enum {
+ INTERFACE_MODE_DISABLED,
+ INTERFACE_MODE_RGMII,
+ INTERFACE_MODE_GMII,
+ INTERFACE_MODE_SPI,
+ INTERFACE_MODE_PCIE,
+ INTERFACE_MODE_XAUI,
+ INTERFACE_MODE_SGMII,
+ INTERFACE_MODE_PICMG,
+ INTERFACE_MODE_NPI,
+ INTERFACE_MODE_LOOP,
+ INTERFACE_MODE_SRIO,
+ INTERFACE_MODE_ILK,
+ INTERFACE_MODE_RXAUI,
+ INTERFACE_MODE_QSGMII,
+ INTERFACE_MODE_AGL,
+};
+
+#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
+#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGSVER 1
+
+static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
+ "Instr posted",
+ "Instr processed",
+ "Instr dropped",
+ "Bytes Sent",
+ "Sgentry_sent",
+ "Inst cntreg",
+ "Tx done",
+ "Tx Iq busy",
+ "Tx dropped",
+ "Tx bytes",
+};
+
+static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
+ "OQ Pkts Received",
+ "OQ Bytes Received",
+ "Dropped no dispatch",
+ "Dropped nomem",
+ "Dropped toomany",
+ "Stack RX cnt",
+ "Stack RX Bytes",
+ "RX dropped",
+};
+
+#define OCTNIC_NCMD_AUTONEG_ON 0x1
+#define OCTNIC_NCMD_PHY_ON 0x2
+
+static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+
+ linfo = &lio->linfo;
+
+ if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+ linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ ecmd->port = PORT_FIBRE;
+ ecmd->supported =
+ (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
+ SUPPORTED_Pause);
+ ecmd->advertising =
+ (ADVERTISED_10000baseT_Full | ADVERTISED_Pause);
+ ecmd->transceiver = XCVR_EXTERNAL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ } else {
+ dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+ }
+
+ if (linfo->link.s.status) {
+ ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
+ ecmd->duplex = linfo->link.s.duplex;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static void
+lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct lio *lio;
+ struct octeon_device *oct;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(drvinfo->driver, "liquidio");
+ strcpy(drvinfo->version, LIQUIDIO_VERSION);
+ strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ ETHTOOL_FWVERS_LEN);
+ strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static void
+lio_ethtool_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf6x);
+ max_tx = CFG_GET_IQ_MAX_Q(conf6x);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ }
+
+ channel->max_rx = max_rx;
+ channel->max_tx = max_tx;
+ channel->rx_count = rx_count;
+ channel->tx_count = tx_count;
+}
+
+static int lio_get_eeprom_len(struct net_device *netdev)
+{
+ u8 buf[128];
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return len;
+}
+
+static int
+lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ if (eeprom->offset != 0)
+ return -EINVAL;
+
+ eeprom->magic = oct_dev->pci_dev->vendor;
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len =
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return 0;
+}
+
+static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = addr;
+ nctrl.ncmd.s.param3 = val;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Callback for when mdio command response arrives
+ */
+static void octnet_mdio_resp_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd_context *mdio_cmd_ctx;
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+
+ oct = lio_get_device(mdio_cmd_ctx->octeon_id);
+ if (status) {
+ dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+ } else {
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+ }
+ wake_up_interruptible(&mdio_cmd_ctx->wc);
+}
+
+/* This routine provides PHY access routines for
+ * mdio clause45 .
+ */
+static int
+octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd_context *mdio_cmd_ctx;
+ struct oct_mdio_cmd *mdio_cmd;
+ int retval = 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_mdio_cmd),
+ sizeof(struct oct_mdio_cmd_resp),
+ sizeof(struct oct_mdio_cmd_context));
+
+ if (!sc)
+ return -ENOMEM;
+
+ mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
+
+ ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+ mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
+ mdio_cmd->op = op;
+ mdio_cmd->mdio_addr = loc;
+ if (op)
+ mdio_cmd->value1 = *value;
+ mdio_cmd->value2 = lio->linfo.ifidx;
+ octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
+ 0, 0, 0);
+
+ sc->wait_time = 1000;
+ sc->callback = octnet_mdio_resp_callback;
+ sc->callback_arg = sc;
+
+ init_waitqueue_head(&mdio_cmd_ctx->wc);
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "octnet_mdio45_access instruction failed status: %x\n",
+ retval);
+ retval = -EBUSY;
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived
+ */
+ sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
+ retval = mdio_cmd_rsp->status;
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
+ retval = -EBUSY;
+ } else {
+ octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
+ sizeof(struct oct_mdio_cmd) / 8);
+
+ if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+ if (!op)
+ *value = mdio_cmd_rsp->resp.value1;
+ } else {
+ retval = -EINVAL;
+ }
+ }
+ }
+
+ octeon_free_soft_command(oct_dev, sc);
+
+ return retval;
+}
+
+static int lio_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int value, ret;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEON);
+ return 2;
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Save the current LED settings */
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ /* Configure Beacon values */
+ value = LIO68XX_LED_BEACON_CFGON;
+ ret =
+ octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
+ if (ret)
+ return ret;
+
+ value = LIO68XX_LED_CTRL_CFGON;
+ ret =
+ octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_ON:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_HIGH);
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_OFF:
+ if (oct->chip_id == OCTEON_CN66XX)
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_LOW);
+ else if (oct->chip_id == OCTEON_CN68XX)
+ return -EINVAL;
+ else
+ return -EINVAL;
+
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEOFF);
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Restore LED settings */
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+lio_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
+ rx_pending = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ }
+
+ if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+ ering->rx_pending = 0;
+ ering->rx_max_pending = 0;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = rx_pending;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = rx_max_pending;
+ } else {
+ ering->rx_pending = rx_pending;
+ ering->rx_max_pending = rx_max_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ }
+
+ ering->tx_pending = tx_pending;
+ ering->tx_max_pending = tx_max_pending;
+}
+
+static u32 lio_get_msglevel(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->msg_enable;
+}
+
+static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
+ if (msglvl & NETIF_MSG_HW)
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE);
+ else
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_DISABLE);
+ }
+
+ lio->msg_enable = msglvl;
+}
+
+static void
+lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers. Just report pause frame support.
+ */
+ pause->tx_pause = 1;
+ pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
+}
+
+static void
+lio_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i = 0, j;
+
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
+ if (!(oct_dev->io_qmask.iq & (1UL << j)))
+ continue;
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ data[i++] =
+ CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_processed);
+ data[i++] =
+ CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+ data[i++] =
+ readl(oct_dev->instr_queue[j]->inst_cnt_reg);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ }
+
+ /* for (j = 0; j < oct_dev->num_oqs; j++){ */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
+ if (!(oct_dev->io_qmask.oq & (1UL << j)))
+ continue;
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+ }
+}
+
+static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int num_iq_stats, num_oq_stats, i, j;
+
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct_dev->io_qmask.iq & (1UL << i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ /* for (i = 0; i < oct_dev->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct_dev->io_qmask.oq & (1UL << i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static int lio_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
+ (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+}
+
+static int lio_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ struct octeon_instr_queue *iq;
+ struct oct_intrmod_cfg *intrmod_cfg;
+
+ intrmod_cfg = &oct->intrmod;
+
+ switch (oct->chip_id) {
+ /* case OCTEON_CN73XX: Todo */
+ /* break; */
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ if (!intrmod_cfg->intrmod_enable) {
+ intr_coal->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+ intr_coal->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+ } else {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg->intrmod_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg->intrmod_check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg->intrmod_maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg->intrmod_minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg->intrmod_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg->intrmod_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg->intrmod_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg->intrmod_mincnt_trigger;
+ }
+
+ iq = oct->instr_queue[lio->linfo.txpciq[0]];
+ intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+ break;
+
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Callback function for intrmod */
+static void octnet_intrmod_callback(struct octeon_device *oct_dev,
+ u32 status,
+ void *ptr)
+{
+ struct oct_intrmod_cmd *cmd = ptr;
+ struct octeon_soft_command *sc = cmd->sc;
+
+ oct_dev = cmd->oct_dev;
+
+ if (status)
+ dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n",
+ CVM_CAST64(status));
+ else
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation enabled:%llx\n",
+ oct_dev->intrmod.intrmod_enable);
+
+ octeon_free_soft_command(oct_dev, sc);
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cmd *cmd;
+ struct oct_intrmod_cfg *cfg;
+ int retval;
+ struct octeon_device *oct_dev = (struct octeon_device *)oct;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_intrmod_cfg),
+ 0,
+ sizeof(struct oct_intrmod_cmd));
+
+ if (!sc)
+ return -ENOMEM;
+
+ cmd = (struct oct_intrmod_cmd *)sc->ctxptr;
+ cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
+
+ memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
+ octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
+ cmd->sc = sc;
+ cmd->cfg = cfg;
+ cmd->oct_dev = oct_dev;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
+
+ sc->callback = octnet_intrmod_callback;
+ sc->callback_arg = cmd;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
+ *intr_coal, int adaptive)
+{
+ int ret = 0;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_intrmod_cfg *intrmod_cfg;
+
+ intrmod_cfg = &oct->intrmod;
+
+ if (adaptive) {
+ if (intr_coal->rate_sample_interval)
+ intrmod_cfg->intrmod_check_intrvl =
+ intr_coal->rate_sample_interval;
+ else
+ intrmod_cfg->intrmod_check_intrvl =
+ LIO_INTRMOD_CHECK_INTERVAL;
+
+ if (intr_coal->pkt_rate_high)
+ intrmod_cfg->intrmod_maxpkt_ratethr =
+ intr_coal->pkt_rate_high;
+ else
+ intrmod_cfg->intrmod_maxpkt_ratethr =
+ LIO_INTRMOD_MAXPKT_RATETHR;
+
+ if (intr_coal->pkt_rate_low)
+ intrmod_cfg->intrmod_minpkt_ratethr =
+ intr_coal->pkt_rate_low;
+ else
+ intrmod_cfg->intrmod_minpkt_ratethr =
+ LIO_INTRMOD_MINPKT_RATETHR;
+
+ if (intr_coal->rx_max_coalesced_frames_high)
+ intrmod_cfg->intrmod_maxcnt_trigger =
+ intr_coal->rx_max_coalesced_frames_high;
+ else
+ intrmod_cfg->intrmod_maxcnt_trigger =
+ LIO_INTRMOD_MAXCNT_TRIGGER;
+
+ if (intr_coal->rx_coalesce_usecs_high)
+ intrmod_cfg->intrmod_maxtmr_trigger =
+ intr_coal->rx_coalesce_usecs_high;
+ else
+ intrmod_cfg->intrmod_maxtmr_trigger =
+ LIO_INTRMOD_MAXTMR_TRIGGER;
+
+ if (intr_coal->rx_coalesce_usecs_low)
+ intrmod_cfg->intrmod_mintmr_trigger =
+ intr_coal->rx_coalesce_usecs_low;
+ else
+ intrmod_cfg->intrmod_mintmr_trigger =
+ LIO_INTRMOD_MINTMR_TRIGGER;
+
+ if (intr_coal->rx_max_coalesced_frames_low)
+ intrmod_cfg->intrmod_mincnt_trigger =
+ intr_coal->rx_max_coalesced_frames_low;
+ else
+ intrmod_cfg->intrmod_mincnt_trigger =
+ LIO_INTRMOD_MINCNT_TRIGGER;
+ }
+
+ intrmod_cfg->intrmod_enable = adaptive;
+ ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+
+ return ret;
+}
+
+static int
+oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
+{
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u32 rx_max_coalesced_frames;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
+
+ /* Disable adaptive interrupt modulation */
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+ if (ret)
+ return ret;
+
+ /* Config Cnt based interrupt values */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ return 0;
+}
+
+static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
+ *intr_coal)
+{
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u32 time_threshold, rx_coalesce_usecs;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+ /* Disable adaptive interrupt modulation */
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
+ if (ret)
+ return ret;
+
+ /* Config Time based interrupt values */
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+
+ return 0;
+}
+
+static int lio_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ u32 j, q_no;
+
+ if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
+ (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j];
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
+ CN6XXX_DB_MAX);
+ return -EINVAL;
+ }
+
+ /* User requested adaptive-rx on */
+ if (intr_coal->use_adaptive_rx_coalesce) {
+ ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off and rx coalesce */
+ if ((intr_coal->rx_coalesce_usecs) &&
+ (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off and rx coalesce */
+ if ((intr_coal->rx_max_coalesced_frames) &&
+ (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ /* User requested adaptive-rx off, so use default coalesce params */
+ if ((!intr_coal->rx_max_coalesced_frames) &&
+ (!intr_coal->use_adaptive_rx_coalesce) &&
+ (!intr_coal->rx_coalesce_usecs)) {
+ dev_info(&oct->pci_dev->dev,
+ "Turning off adaptive-rx interrupt moderation\n");
+ dev_info(&oct->pci_dev->dev,
+ "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
+ CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
+ ret = oct_cfg_rx_intrtime(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+
+ ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ }
+
+ return 0;
+ret_intrmod:
+ return ret;
+}
+
+static int lio_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (lio->ptp_clock)
+ info->phc_index = ptp_clock_index(lio->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+
+static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ /* get the link info */
+ linfo = &lio->linfo;
+
+ if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 &&
+ ecmd->speed != SPEED_10) ||
+ (ecmd->duplex != DUPLEX_HALF &&
+ ecmd->duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+ * as they operate at fixed Speed and Duplex settings
+ */
+ if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
+ linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+ return -EINVAL;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+ nctrl.wait_time = 1000;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
+ * to SE core application using ncmd.s.more & ncmd.s.param
+ */
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ /* Autoneg ON */
+ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
+ OCTNIC_NCMD_AUTONEG_ON;
+ nctrl.ncmd.s.param2 = ecmd->advertising;
+ } else {
+ /* Autoneg OFF */
+ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
+
+ nctrl.ncmd.s.param3 = ecmd->duplex;
+
+ nctrl.ncmd.s.param2 = ecmd->speed;
+ }
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int lio_nway_reset(struct net_device *netdev)
+{
+ if (netif_running(netdev)) {
+ struct ethtool_cmd ecmd;
+
+ memset(&ecmd, 0, sizeof(struct ethtool_cmd));
+ ecmd.autoneg = 0;
+ ecmd.speed = 0;
+ ecmd.duplex = 0;
+ lio_set_settings(netdev, &ecmd);
+ }
+ return 0;
+}
+
+/* Return register dump len. */
+static int lio_get_regs_len(struct net_device *dev)
+{
+ return OCT_ETHTOOL_REGDUMP_LEN;
+}
+
+static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ int i, len = 0;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+ reg = CN6XXX_WIN_WR_ADDR_LO;
+ len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
+ CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
+ CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_LO;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
+ CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
+ CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_LO;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
+ CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
+ CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
+ len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
+ CN6XXX_WIN_WR_MASK_REG,
+ octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
+
+ /* PCI Interrupt Register */
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
+ CN6XXX_SLI_INT_ENB64_PORT0));
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT1,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
+ len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
+
+ /* PCI Output queue registers */
+ for (i = 0; i < oct->num_oqs; i++) {
+ reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
+ len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+
+ /* PCI Input queue registers */
+ for (i = 0; i <= 3; i++) {
+ u32 reg;
+
+ reg = CN6XXX_SLI_IQ_DOORBELL(i);
+ len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
+ len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+
+ /* PCI DMA registers */
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
+ CN6XXX_DMA_CNT(0),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(0),
+ octeon_read_csr(oct, reg));
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
+ CN6XXX_DMA_CNT(1),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+
+ /* PCI Index registers */
+
+ len += sprintf(s + len, "\n");
+
+ for (i = 0; i < 16; i++) {
+ reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
+ len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
+ CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
+ }
+
+ return len;
+}
+
+static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
+{
+ u32 val;
+ int i, len = 0;
+
+ /* PCI CONFIG Registers */
+
+ len += sprintf(s + len,
+ "\n\t Octeon Config space Registers\n\n");
+
+ for (i = 0; i <= 13; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ for (i = 30; i <= 34; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ return len;
+}
+
+/* Return register dump user app. */
+static void lio_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct lio *lio = GET_LIO(dev);
+ int len = 0;
+ struct octeon_device *oct = lio->oct_dev;
+
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
+ regs->version = OCT_ETHTOOL_REGSVER;
+
+ switch (oct->chip_id) {
+ /* case OCTEON_CN73XX: Todo */
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ len += cn6xxx_read_csr_reg(regbuf + len, oct);
+ len += cn6xxx_read_config_reg(regbuf + len, oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
+ __func__, oct->chip_id);
+ }
+}
+
+static const struct ethtool_ops lio_ethtool_ops = {
+ .get_settings = lio_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = lio_get_drvinfo,
+ .get_ringparam = lio_ethtool_get_ringparam,
+ .get_channels = lio_ethtool_get_channels,
+ .set_phys_id = lio_set_phys_id,
+ .get_eeprom_len = lio_get_eeprom_len,
+ .get_eeprom = lio_get_eeprom,
+ .get_strings = lio_get_strings,
+ .get_ethtool_stats = lio_get_ethtool_stats,
+ .get_pauseparam = lio_get_pauseparam,
+ .get_regs_len = lio_get_regs_len,
+ .get_regs = lio_get_regs,
+ .get_msglevel = lio_get_msglevel,
+ .set_msglevel = lio_set_msglevel,
+ .get_sset_count = lio_get_sset_count,
+ .nway_reset = lio_nway_reset,
+ .set_settings = lio_set_settings,
+ .get_coalesce = lio_get_intr_coalesce,
+ .set_coalesce = lio_set_intr_coalesce,
+ .get_ts_info = lio_get_ts_info,
+};
+
+void liquidio_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &lio_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
new file mode 100644
index 000000000000..0660deecc2c9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -0,0 +1,3668 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LIQUIDIO_VERSION);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
+
+static int ddr_timeout = 10000;
+module_param(ddr_timeout, int, 0644);
+MODULE_PARM_DESC(ddr_timeout,
+ "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
+
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+static char fw_type[LIO_MAX_FW_TYPE_LEN];
+module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
+
+static int conf_type;
+module_param(conf_type, int, 0);
+MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+
+/* Polling interval for determining when NIC application is alive */
+#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
+
+/* runtime link query interval */
+#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
+
+struct liquidio_if_cfg_context {
+ int octeon_id;
+
+ wait_queue_head_t wc;
+
+ int cond;
+};
+
+struct liquidio_if_cfg_resp {
+ u64 rh;
+ struct liquidio_if_cfg_info cfg_info;
+ u64 status;
+};
+
+struct oct_link_status_resp {
+ u64 rh;
+ struct oct_link_info link_info;
+ u64 status;
+};
+
+struct oct_timestamp_resp {
+ u64 rh;
+ u64 timestamp;
+ u64 status;
+};
+
+#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
+
+union tx_info {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gso_size;
+ u16 gso_segs;
+ u32 reserved;
+#else
+ u32 reserved;
+ u16 gso_segs;
+ u16 gso_size;
+#endif
+ } s;
+};
+
+/** Octeon device properties to be used by the NIC module.
+ * Each octeon device in the system will be represented
+ * by this structure in the NIC module.
+ */
+
+#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+/** Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+ /** List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /** Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /** Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /** Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+};
+
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
+
+struct handshake {
+ struct completion init;
+ struct completion started;
+ struct pci_dev *pci_dev;
+ int init_ok;
+ int started_ok;
+};
+
+struct octeon_device_priv {
+ /** Tasklet structures for this device. */
+ struct tasklet_struct droq_tasklet;
+ unsigned long napi_mask;
+};
+
+static int octeon_device_init(struct octeon_device *);
+static void liquidio_remove(struct pci_dev *pdev);
+static int liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static struct handshake handshake[MAX_OCTEON_DEVICES];
+static struct completion first_stage;
+
+static void octeon_droq_bh(unsigned long pdev)
+{
+ int q_no;
+ int reschedule = 0;
+ struct octeon_device *oct = (struct octeon_device *)pdev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
+ if (!(oct->io_qmask.oq & (1UL << q_no)))
+ continue;
+ reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
+ MAX_PACKET_BUDGET);
+ }
+
+ if (reschedule)
+ tasklet_schedule(&oct_priv->droq_tasklet);
+}
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ int retry = 100, pkt_cnt = 0, pending_pkts = 0;
+ int i;
+
+ do {
+ pending_pkts = 0;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
+ oct->droq[i]);
+ }
+ if (pkt_cnt > 0) {
+ pending_pkts += pkt_cnt;
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ pkt_cnt = 0;
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pkt_cnt;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+ *bytes_compl += skb->len;
+}
+
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+}
+
+int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
+
+/**
+ * \brief Forces all IO queues off on a given device
+ * @param oct Pointer to Octeon device
+ */
+static void force_io_queues_off(struct octeon_device *oct)
+{
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) {
+ /* Reset the Enable bits for Input Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+
+ /* Reset the Enable bits for Output Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static int wait_for_pending_requests(struct octeon_device *oct)
+{
+ int i, pcount = 0;
+
+ for (i = 0; i < 100; i++) {
+ pcount =
+ atomic_read(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count);
+ if (pcount)
+ schedule_timeout_uninterruptible(HZ / 10);
+ else
+ break;
+ }
+
+ if (pcount)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * \brief Cause device to go quiet so it can be safely removed/reset/etc
+ * @param oct Pointer to Octeon device
+ */
+static inline void pcierror_quiesce_device(struct octeon_device *oct)
+{
+ int i;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet processing
+ * to finish.
+ */
+ force_io_queues_off(oct);
+
+ /* To allow for in-flight requests */
+ schedule_timeout_uninterruptible(100);
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to complete. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ /* Force all pending ordered list requests to time out. */
+ lio_process_ordered_list(oct, 1);
+
+ /* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * \brief Cleanup PCI AER uncorrectable error status
+ * @param dev Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ int pos = 0x100;
+ u32 status, mask;
+
+ pr_info("%s :\n", __func__);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * \brief Stop all PCI IO to a given device
+ * @param dev Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ pci_disable_device(oct->pci_dev);
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ pcierror_quiesce_device(oct);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
+ /* making it a common function for all OCTEON models */
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+}
+
+/**
+ * \brief called when PCI error is detected
+ * @param pdev Pointer to PCI device
+ * @param state The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct octeon_device *oct = pci_get_drvdata(pdev);
+
+ /* Non-correctable Non-fatal errors */
+ if (state == pci_channel_io_normal) {
+ dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
+ /* Non-correctable Fatal errors */
+ dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+ stop_pci_io(oct);
+
+ /* Always return a DISCONNECT. There is no support for recovery but only
+ * for a clean shutdown.
+ */
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * \brief mmio handler
+ * @param pdev Pointer to PCI device
+ */
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called after the pci bus has been reset.
+ * @param pdev Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the octeon_resume routine.
+ */
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * \brief called when traffic can start flowing again.
+ * @param pdev Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the octeon_resume routine.
+ */
+static void liquidio_pcie_resume(struct pci_dev *pdev)
+{
+ /* Nothing to be done here. */
+}
+
+#ifdef CONFIG_PM
+/**
+ * \brief called when suspending
+ * @param pdev Pointer to PCI device
+ * @param state state to suspend to
+ */
+static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ return 0;
+}
+
+/**
+ * \brief called when resuming
+ * @param pdev Pointer to PCI device
+ */
+static int liquidio_resume(struct pci_dev *pdev)
+{
+ return 0;
+}
+#endif
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static struct pci_error_handlers liquidio_err_handler = {
+ .error_detected = liquidio_pcie_error_detected,
+ .mmio_enabled = liquidio_pcie_mmio_enabled,
+ .slot_reset = liquidio_pcie_slot_reset,
+ .resume = liquidio_pcie_resume,
+};
+
+static const struct pci_device_id liquidio_pci_tbl[] = {
+ { /* 68xx */
+ PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ { /* 66xx */
+ PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0
+ }
+};
+MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
+
+static struct pci_driver liquidio_pci_driver = {
+ .name = "LiquidIO",
+ .id_table = liquidio_pci_tbl,
+ .probe = liquidio_probe,
+ .remove = liquidio_remove,
+ .err_handler = &liquidio_err_handler, /* For AER */
+
+#ifdef CONFIG_PM
+ .suspend = liquidio_suspend,
+ .resume = liquidio_resume,
+#endif
+
+};
+
+/**
+ * \brief register PCI driver
+ */
+static int liquidio_init_pci(void)
+{
+ return pci_register_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief unregister PCI driver
+ */
+static void liquidio_deinit_pci(void)
+{
+ pci_unregister_driver(&liquidio_pci_driver);
+}
+
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
+/**
+ * \brief Stop Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_stop(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+ } else {
+ netif_stop_queue(netdev);
+ }
+}
+
+/**
+ * \brief Start Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_start(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+ } else {
+ netif_start_queue(netdev);
+ }
+}
+
+/**
+ * \brief Wake Tx queues
+ * @param netdev network device
+ */
+static inline void txqs_wake(struct net_device *netdev)
+{
+ if (netif_is_multiqueue(netdev)) {
+ int i;
+
+ for (i = 0; i < netdev->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+ } else {
+ netif_wake_queue(netdev);
+ }
+}
+
+/**
+ * \brief Stop Tx queue
+ * @param netdev network device
+ */
+static void stop_txq(struct net_device *netdev)
+{
+ txqs_stop(netdev);
+}
+
+/**
+ * \brief Start Tx queue
+ * @param netdev network device
+ */
+static void start_txq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->linfo.link.s.status) {
+ txqs_start(netdev);
+ return;
+ }
+}
+
+/**
+ * \brief Wake a queue
+ * @param netdev network device
+ * @param q which queue to wake
+ */
+static inline void wake_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_wake_subqueue(netdev, q);
+ else
+ netif_wake_queue(netdev);
+}
+
+/**
+ * \brief Stop a queue
+ * @param netdev network device
+ * @param q which queue to stop
+ */
+static inline void stop_q(struct net_device *netdev, int q)
+{
+ if (netif_is_multiqueue(netdev))
+ netif_stop_subqueue(netdev, q);
+ else
+ netif_stop_queue(netdev);
+}
+
+/**
+ * \brief Check Tx queue status, and take appropriate action
+ * @param lio per-network private data
+ * @returns 0 if full, number of queues woken up otherwise
+ */
+static inline int check_txq_status(struct lio *lio)
+{
+ int ret_val = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ int numqs = lio->netdev->num_tx_queues;
+ int q, iq = 0;
+
+ /* check each sub-queue state */
+ for (q = 0; q < numqs; q++) {
+ iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ continue;
+ wake_q(lio->netdev, q);
+ ret_val++;
+ }
+ } else {
+ if (octnet_iq_is_full(lio->oct_dev, lio->txq))
+ return 0;
+ wake_q(lio->netdev, lio->txq);
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if ((root->prev == root) && (root->next == root))
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
+}
+
+/**
+ * \brief Delete gather list
+ * @param lio per-network private data
+ */
+static void delete_glist(struct lio *lio)
+{
+ struct octnic_gather *g;
+
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist);
+ if (g) {
+ if (g->sg)
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ }
+ } while (g);
+}
+
+/**
+ * \brief Setup gather list
+ * @param lio per-network private data
+ */
+static int setup_glist(struct lio *lio)
+{
+ int i;
+ struct octnic_gather *g;
+
+ INIT_LIST_HEAD(&lio->glist);
+
+ for (i = 0; i < lio->tx_qsize; i++) {
+ g = kmalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+ memset(g, 0, sizeof(struct octnic_gather));
+
+ g->sg_size =
+ ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit boundary */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ list_add_tail(&g->list, &lio->glist);
+ }
+
+ if (i == lio->tx_qsize)
+ return 0;
+
+ delete_glist(lio);
+ return 1;
+}
+
+/**
+ * \brief Print link information
+ * @param netdev network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
+ struct oct_link_info *linfo = &lio->linfo;
+
+ if (linfo->link.s.status) {
+ netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+ linfo->link.s.speed,
+ (linfo->link.s.duplex) ? "Full" : "Half");
+ } else {
+ netif_info(lio, link, lio->netdev, "Link Down\n");
+ }
+ }
+}
+
+/**
+ * \brief Update link status
+ * @param netdev network device
+ * @param ls link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static inline void update_link_status(struct net_device *netdev,
+ union oct_link_status *ls)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
+ lio->linfo.link.u64 = ls->u64;
+
+ print_link_info(netdev);
+
+ if (lio->linfo.link.s.status) {
+ netif_carrier_on(netdev);
+ /* start_txq(netdev); */
+ txqs_wake(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ stop_txq(netdev);
+ }
+ }
+}
+
+/**
+ * \brief Droq packet processor sceduler
+ * @param oct octeon device
+ */
+static
+void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ u64 oq_no;
+ struct octeon_droq *droq;
+
+ if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
+ if (!(oct->droq_intr & (1 << oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ oct_priv->napi_mask |= (1 << oq_no);
+ } else {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ }
+ }
+}
+
+/**
+ * \brief Interrupt handler for octeon
+ * @param irq unused
+ * @param dev octeon device
+ */
+static
+irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ irqreturn_t ret;
+
+ /* Disable our interrupts for the duration of ISR */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ ret = oct->fn_list.process_interrupt_regs(oct);
+
+ if (ret == IRQ_HANDLED)
+ liquidio_schedule_droq_pkt_handlers(oct);
+
+ /* Re-enable our interrupts */
+ if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+ oct->fn_list.enable_interrupt(oct->chip);
+
+ return ret;
+}
+
+/**
+ * \brief Setup interrupt for octeon device
+ * @param oct octeon device
+ *
+ * Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+static int octeon_setup_interrupt(struct octeon_device *oct)
+{
+ int irqret, err;
+
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
+ IRQF_SHARED, "octeon", oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief PCI probe handler
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octeon_device *oct_dev = NULL;
+ struct handshake *hs;
+
+ oct_dev = octeon_allocate_device(pdev->device,
+ sizeof(struct octeon_device_priv));
+ if (!oct_dev) {
+ dev_err(&pdev->dev, "Unable to allocate device\n");
+ return -ENOMEM;
+ }
+
+ dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+ (u32)pdev->vendor, (u32)pdev->device);
+
+ /* Assign octeon_device for this device to the private data area. */
+ pci_set_drvdata(pdev, oct_dev);
+
+ /* set linux specific device pointer */
+ oct_dev->pci_dev = (void *)pdev;
+
+ hs = &handshake[oct_dev->octeon_id];
+ init_completion(&hs->init);
+ init_completion(&hs->started);
+ hs->pci_dev = pdev;
+
+ if (oct_dev->octeon_id == 0)
+ /* first LiquidIO NIC is detected */
+ complete(&first_stage);
+
+ if (octeon_device_init(oct_dev)) {
+ liquidio_remove(pdev);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+ return 0;
+}
+
+/**
+ *\brief Destroy resources associated with octeon device
+ * @param pdev PCI device structure
+ * @param ent unused
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+ int i;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ struct handshake *hs;
+
+ switch (atomic_read(&oct->status)) {
+ case OCT_DEV_RUNNING:
+ case OCT_DEV_CORE_OK:
+
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ oct->app_mode = CVM_DRV_INVALID_APP;
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ /* fallthrough */
+ case OCT_DEV_HOST_OK:
+
+ /* fallthrough */
+ case OCT_DEV_CONSOLE_INIT_DONE:
+ /* Remove any consoles */
+ octeon_remove_consoles(oct);
+
+ /* fallthrough */
+ case OCT_DEV_IO_QUEUES_DONE:
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet
+ * processing to finish.
+ */
+ oct->fn_list.disable_io_queues(oct);
+
+ if (lio_wait_for_oq_pkts(oct))
+ dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct->chip);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ /* Soft reset the octeon device before exiting */
+ oct->fn_list.soft_reset(oct);
+
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
+ /* fallthrough */
+ case OCT_DEV_IN_RESET:
+ case OCT_DEV_DROQ_INIT_DONE:
+ /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
+ mdelay(100);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ if (!(oct->io_qmask.oq & (1UL << i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ /* Force any pending handshakes to complete */
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+
+ if (hs->pci_dev) {
+ handshake[oct->octeon_id].init_ok = 0;
+ complete(&handshake[oct->octeon_id].init);
+ handshake[oct->octeon_id].started_ok = 0;
+ complete(&handshake[oct->octeon_id].started);
+ }
+ }
+
+ /* fallthrough */
+ case OCT_DEV_RESP_LIST_INIT_DONE:
+ octeon_delete_response_list(oct);
+
+ /* fallthrough */
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
+
+ /* fallthrough */
+ case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ /* fallthrough */
+ case OCT_DEV_DISPATCH_INIT_DONE:
+ octeon_delete_dispatch_list(oct);
+ cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+ /* fallthrough */
+ case OCT_DEV_PCI_MAP_DONE:
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+
+ /* fallthrough */
+ case OCT_DEV_BEGIN_STATE:
+ /* Nothing to be done here either */
+ break;
+ } /* end switch(oct->status) */
+
+ tasklet_kill(&oct_priv->droq_tasklet);
+}
+
+/**
+ * \brief Send Rx control command
+ * @param lio per-network private data
+ * @param start_stop whether to start or stop
+ */
+static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = start_stop;
+ nctrl.netpndev = (u64)lio->netdev;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+}
+
+/**
+ * \brief Destroy NIC device interface
+ * @param oct octeon device
+ * @param ifidx which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+ struct net_device *netdev = oct->props[ifidx].netdev;
+ struct lio *lio;
+
+ if (!netdev) {
+ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+ __func__, ifidx);
+ return;
+ }
+
+ lio = GET_LIO(netdev);
+
+ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+ send_rx_ctrl_cmd(lio, 0);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+ txqs_stop(netdev);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+ unregister_netdev(netdev);
+
+ delete_glist(lio);
+
+ free_netdev(netdev);
+
+ oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * \brief Stop complete NIC functionality
+ * @param oct octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+ int i, j;
+ struct lio *lio;
+
+ dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ if (!oct->ifcount) {
+ dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+ return 1;
+ }
+
+ for (i = 0; i < oct->ifcount; i++) {
+ lio = GET_LIO(oct->props[i].netdev);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++)
+ octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+ }
+
+ for (i = 0; i < oct->ifcount; i++)
+ liquidio_destroy_nic_device(oct, i);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+ return 0;
+}
+
+/**
+ * \brief Cleans up resources at unload time
+ * @param pdev PCI device structure
+ */
+static void liquidio_remove(struct pci_dev *pdev)
+{
+ struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+ if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
+ liquidio_stop_nic_module(oct_dev);
+
+ /* Reset the octeon device and cleanup all memory allocated for
+ * the octeon device by driver.
+ */
+ octeon_destroy_resources(oct_dev);
+
+ dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+ /* This octeon device has been removed. Update the global
+ * data structure to reflect this. Free the device structure.
+ */
+ octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * \brief Identify the Octeon device and to map the BAR address space
+ * @param oct octeon device
+ */
+static int octeon_chip_specific_setup(struct octeon_device *oct)
+{
+ u32 dev_id, rev_id;
+ int ret = 1;
+
+ pci_read_config_dword(oct->pci_dev, 0, &dev_id);
+ pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+ oct->rev_id = rev_id & 0xff;
+
+ switch (dev_id) {
+ case OCTEON_CN68XX_PCIID:
+ oct->chip_id = OCTEON_CN68XX;
+ ret = lio_setup_cn68xx_octeon_device(oct);
+ break;
+
+ case OCTEON_CN66XX_PCIID:
+ oct->chip_id = OCTEON_CN66XX;
+ ret = lio_setup_cn66xx_octeon_device(oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
+ dev_id);
+ }
+
+ if (!ret)
+ dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+ OCTEON_MAJOR_REV(oct),
+ OCTEON_MINOR_REV(oct),
+ octeon_get_conf(oct)->card_name);
+
+ return ret;
+}
+
+/**
+ * \brief PCI initialization for each Octeon device.
+ * @param oct octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+ /* setup PCI stuff first */
+ if (pci_enable_device(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+ return 1;
+ }
+
+ if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+ dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ return 1;
+ }
+
+ /* Enable PCI DMA Master. */
+ pci_set_master(oct->pci_dev);
+
+ return 0;
+}
+
+/**
+ * \brief Check Tx queue state for a given network buffer
+ * @param lio per-network private data
+ * @param skb network buffer
+ */
+static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0, iq = 0;
+
+ if (netif_is_multiqueue(lio->netdev)) {
+ q = skb->queue_mapping;
+ iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+ } else {
+ iq = lio->txq;
+ }
+
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ return 0;
+ wake_q(lio->netdev, q);
+ return 1;
+}
+
+/**
+ * \brief Unmap and free network buffer
+ * @param buf buffer
+ */
+static void free_netbuf(void *buf)
+{
+ struct sk_buff *skb;
+ struct octnet_buf_free_info *finfo;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+ DMA_TO_DEVICE);
+
+ check_txq_state(lio, skb);
+
+ recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer
+ * @param buf buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ spin_lock(&lio->lock);
+ list_add_tail(&g->list, &lio->glist);
+ spin_unlock(&lio->lock);
+
+ check_txq_state(lio, skb); /* mq support: sub-queue state check */
+
+ recv_buffer_free((struct sk_buff *)skb);
+}
+
+/**
+ * \brief Unmap and free gather buffer with response
+ * @param buf buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+ struct octeon_soft_command *sc;
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags;
+
+ sc = (struct octeon_soft_command *)buf;
+ skb = (struct sk_buff *)sc->callback_arg;
+ finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ pci_unmap_page((lio->oct_dev)->pci_dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ frag->size, DMA_TO_DEVICE);
+ i++;
+ }
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ finfo->dptr, g->sg_size,
+ DMA_TO_DEVICE);
+
+ spin_lock(&lio->lock);
+ list_add_tail(&g->list, &lio->glist);
+ spin_unlock(&lio->lock);
+
+ /* Don't free the skb yet */
+
+ check_txq_state(lio, skb);
+}
+
+/**
+ * \brief Adjust ptp frequency
+ * @param ptp PTP clock info
+ * @param ppb how much to adjust by, in parts-per-billion
+ */
+static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ u64 comp, delta;
+ unsigned long flags;
+ bool neg_adj = false;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+
+ /* The hardware adds the clock compensation value to the
+ * PTP clock on every coprocessor clock cycle, so we
+ * compute the delta in terms of coprocessor clocks.
+ */
+ delta = (u64)ppb << 32;
+ do_div(delta, oct->coproc_clock_rate);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
+ if (neg_adj)
+ comp -= delta;
+ else
+ comp += delta;
+ lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Adjust ptp time
+ * @param ptp PTP clock info
+ * @param delta how much to adjust by, in nanosecs
+ */
+static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio->ptp_adjust += delta;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Get hardware clock time, including any adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ u64 ns;
+ u32 remainder;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
+ ns += lio->ptp_adjust;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * \brief Set hardware clock time. Reset adjustment
+ * @param ptp PTP clock info
+ * @param ts timespec
+ */
+static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ ns = timespec_to_ns(ts);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
+ lio->ptp_adjust = 0;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * \brief Check if PTP is enabled
+ * @param ptp PTP clock info
+ * @param rq request
+ * @param on is it on
+ */
+static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * \brief Open PTP clock source
+ * @param netdev network device
+ */
+static void oct_ptp_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_init(&lio->ptp_lock);
+
+ snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
+ lio->ptp_info.owner = THIS_MODULE;
+ lio->ptp_info.max_adj = 250000000;
+ lio->ptp_info.n_alarm = 0;
+ lio->ptp_info.n_ext_ts = 0;
+ lio->ptp_info.n_per_out = 0;
+ lio->ptp_info.pps = 0;
+ lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjtime = liquidio_ptp_adjtime;
+ lio->ptp_info.gettime64 = liquidio_ptp_gettime;
+ lio->ptp_info.settime64 = liquidio_ptp_settime;
+ lio->ptp_info.enable = liquidio_ptp_enable;
+
+ lio->ptp_adjust = 0;
+
+ lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
+ &oct->pci_dev->dev);
+
+ if (IS_ERR(lio->ptp_clock))
+ lio->ptp_clock = NULL;
+}
+
+/**
+ * \brief Init PTP clock
+ * @param oct octeon device
+ */
+static void liquidio_ptp_init(struct octeon_device *oct)
+{
+ u64 clock_comp, cfg;
+
+ clock_comp = (u64)NSEC_PER_SEC << 32;
+ do_div(clock_comp, oct->coproc_clock_rate);
+ lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+
+ /* Enable */
+ cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
+ lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
+}
+
+/**
+ * \brief Load firmware to device
+ * @param oct octeon device
+ *
+ * Maps device to firmware filename, requests firmware, and downloads it
+ */
+static int load_firmware(struct octeon_device *oct)
+{
+ int ret = 0;
+ const struct firmware *fw;
+ char fw_name[LIO_MAX_FW_FILENAME_LEN];
+ char *tmp_fw_type;
+
+ if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
+ sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
+ dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
+ return ret;
+ }
+
+ if (fw_type[0] == '\0')
+ tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
+ else
+ tmp_fw_type = fw_type;
+
+ sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
+ octeon_get_conf(oct)->card_name, tmp_fw_type,
+ LIO_FW_NAME_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
+ fw_name);
+ return ret;
+ }
+
+ ret = octeon_download_firmware(oct, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/**
+ * \brief Setup output queue
+ * @param oct octeon device
+ * @param q_no which queue
+ * @param num_descs how many descriptors
+ * @param desc_size size of each descriptor
+ * @param app_ctx application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+ int desc_size, void *app_ctx)
+{
+ int ret_val = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+ /* droq creation and local register settings. */
+ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (ret_val == -1)
+ return ret_val;
+
+ if (ret_val == 1) {
+ dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+ return 0;
+ }
+ /* tasklet creation for the droq */
+
+ /* Enable the droq queues */
+ octeon_set_droq_pkt_op(oct, q_no, 1);
+
+ /* Send Credit for Octeon Output queues. Credits are always
+ * sent after the output queue is enabled.
+ */
+ writel(oct->droq[q_no]->max_count,
+ oct->droq[q_no]->pkts_credit_reg);
+
+ return ret_val;
+}
+
+/**
+ * \brief Callback for getting interface configuration
+ * @param status status of request
+ * @param buf pointer to resp structure
+ */
+static void if_cfg_callback(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct liquidio_if_cfg_resp *resp;
+ struct liquidio_if_cfg_context *ctx;
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ oct = lio_get_device(ctx->octeon_id);
+ if (resp->status)
+ dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
+ CVM_CAST64(resp->status));
+ ACCESS_ONCE(ctx->cond) = 1;
+
+ /* This barrier is required to be sure that the response has been
+ * written fully before waking up the handler
+ */
+ wmb();
+
+ wake_up_interruptible(&ctx->wc);
+}
+
+/**
+ * \brief Select queue based on hash
+ * @param dev Net device
+ * @param skb sk_buff structure
+ * @returns selected queue number
+ */
+static u16 select_q(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ int qindex;
+ struct lio *lio;
+
+ lio = GET_LIO(dev);
+ /* select queue on chosen queue_mapping or core */
+ qindex = skb_rx_queue_recorded(skb) ?
+ skb_get_rx_queue(skb) : smp_processor_id();
+ return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+}
+
+/** Routine to push packets arriving on Octeon interface upto network layer.
+ * @param oct_id - octeon device id.
+ * @param skbuff - skbuff struct to be passed to network layer.
+ * @param len - size of total data received.
+ * @param rh - Control header associated with the packet
+ * @param param - additional control data with the packet
+ */
+static void
+liquidio_push_packet(u32 octeon_id,
+ void *skbuff,
+ u32 len,
+ union octeon_rh *rh,
+ void *param)
+{
+ struct napi_struct *napi = param;
+ struct octeon_device *oct = lio_get_device(octeon_id);
+ struct sk_buff *skb = (struct sk_buff *)skbuff;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 ns;
+ struct net_device *netdev =
+ (struct net_device *)oct->props[rh->r_dh.link].netdev;
+ struct octeon_droq *droq = container_of(param, struct octeon_droq,
+ napi);
+ if (netdev) {
+ int packet_was_received;
+ struct lio *lio = GET_LIO(netdev);
+
+ /* Do not proceed if the interface is not in RUNNING state. */
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ recv_buffer_free(skb);
+ droq->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = netdev;
+
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at the
+ * beginning of the packet.
+ */
+ if (ifstate_check(lio,
+ LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data), sizeof(ns));
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns + lio->ptp_adjust);
+ }
+ skb_pull(skb, sizeof(ns));
+ }
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+ /* checksum has already been verified */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
+
+ if (packet_was_received) {
+ droq->stats.rx_bytes_received += len;
+ droq->stats.rx_pkts_received++;
+ netdev->last_rx = jiffies;
+ } else {
+ droq->stats.rx_dropped++;
+ netif_info(lio, rx_err, lio->netdev,
+ "droq:%d error rx_dropped:%llu\n",
+ droq->q_no, droq->stats.rx_dropped);
+ }
+
+ } else {
+ recv_buffer_free(skb);
+ }
+}
+
+/**
+ * \brief wrapper for calling napi_schedule
+ * @param param parameters to pass to napi_schedule
+ *
+ * Used when scheduling on different CPUs
+ */
+static void napi_schedule_wrapper(void *param)
+{
+ struct napi_struct *napi = param;
+
+ napi_schedule(napi);
+}
+
+/**
+ * \brief callback when receive interrupt occurs and we are in NAPI mode
+ * @param arg pointer to octeon output queue
+ */
+static void liquidio_napi_drv_callback(void *arg)
+{
+ struct octeon_droq *droq = arg;
+ int this_cpu = smp_processor_id();
+
+ if (droq->cpu_id == this_cpu) {
+ napi_schedule(&droq->napi);
+ } else {
+ struct call_single_data *csd = &droq->csd;
+
+ csd->func = napi_schedule_wrapper;
+ csd->info = &droq->napi;
+ csd->flags = 0;
+
+ smp_call_function_single_async(droq->cpu_id, csd);
+ }
+}
+
+/**
+ * \brief Main NAPI poll function
+ * @param droq octeon output queue
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
+{
+ int work_done;
+ struct lio *lio = GET_LIO(droq->napi.dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
+ if (work_done < 0) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Receive work_done < 0, rxq:%d\n", droq->q_no);
+ goto octnet_napi_finish;
+ }
+
+ if (work_done > budget)
+ dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
+ __func__, work_done, budget);
+
+ return work_done;
+
+octnet_napi_finish:
+ napi_complete(&droq->napi);
+ octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
+ 0);
+ return 0;
+}
+
+/**
+ * \brief Entry point for NAPI polling
+ * @param napi NAPI structure
+ * @param budget maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_droq *droq;
+ int work_done;
+
+ droq = container_of(napi, struct octeon_droq, napi);
+
+ work_done = liquidio_napi_do_rx(droq, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
+ POLL_EVENT_ENABLE_INTR, 0);
+ return 0;
+ }
+
+ return work_done;
+}
+
+/**
+ * \brief Setup input and output queues
+ * @param octeon_dev octeon device
+ * @param net_device Net device
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+static inline int setup_io_queues(struct octeon_device *octeon_dev,
+ struct net_device *net_device)
+{
+ static int first_time = 1;
+ static struct octeon_droq_ops droq_ops;
+ static int cpu_id;
+ static int cpu_id_modulus;
+ struct octeon_droq *droq;
+ struct napi_struct *napi;
+ int q, q_no, retval = 0;
+ struct lio *lio;
+ int num_tx_descs;
+
+ lio = GET_LIO(net_device);
+ if (first_time) {
+ first_time = 0;
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
+ }
+
+ /* set up DROQs. */
+ for (q = 0; q < lio->linfo.num_rxpciq; q++) {
+ q_no = lio->linfo.rxpciq[q];
+
+ retval = octeon_setup_droq(octeon_dev, q_no,
+ CFG_GET_NUM_RX_DESCS_NIC_IF
+ (octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
+ (octeon_get_conf(octeon_dev),
+ lio->ifidx), NULL);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime DROQ(RxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ droq = octeon_dev->droq[q_no];
+ napi = &droq->napi;
+ netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+
+ /* designate a CPU for this droq */
+ droq->cpu_id = cpu_id;
+ cpu_id++;
+ if (cpu_id >= cpu_id_modulus)
+ cpu_id = 0;
+
+ octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+ }
+
+ /* set up IQs. */
+ for (q = 0; q < lio->linfo.num_txpciq; q++) {
+ num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
+ (octeon_dev),
+ lio->ifidx);
+ retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
+ num_tx_descs,
+ netdev_get_tx_queue(net_device, q));
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime IQ(TxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * \brief Poll routine for checking transmit queue status
+ * @param work work_struct data structure
+ */
+static void octnet_poll_check_txq_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
+ return;
+
+ check_txq_status(lio);
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Sets up the txq poll check
+ * @param netdev network device
+ */
+static inline void setup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->txq_status_wq.wq = create_workqueue("txq-status");
+ if (!lio->txq_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
+ return;
+ }
+ INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
+ octnet_poll_check_txq_status);
+ lio->txq_status_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * \brief Net device open for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct napi_struct *napi, *n;
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct_ptp_open(netdev);
+
+ ifstate_set(lio, LIO_IFSTATE_RUNNING);
+ setup_tx_poll_fn(netdev);
+ start_txq(netdev);
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ try_module_get(THIS_MODULE);
+
+ /* tell Octeon to start forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 1);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
+ netdev->name);
+
+ return 0;
+}
+
+/**
+ * \brief Net device stop for LiquidIO
+ * @param netdev network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+ struct napi_struct *napi, *n;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ /* Inform that netif carrier is down */
+ lio->intf_open = 0;
+ lio->linfo.link.s.status = 0;
+
+ netif_carrier_off(netdev);
+
+ /* tell Octeon to stop forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 0);
+
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ flush_workqueue(lio->txq_status_wq.wq);
+ destroy_workqueue(lio->txq_status_wq.wq);
+
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ /* This is a hack that allows DHCP to continue working. */
+ set_bit(__LINK_STATE_START, &lio->netdev->state);
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ txqs_stop(netdev);
+
+ dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ /* If command is successful, change the MACADDR. */
+ netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
+ CVM_CAST64(nctrl->udd[0]));
+ dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
+ netdev->name, CVM_CAST64(nctrl->udd[0]));
+ memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+ break;
+
+ case OCTNET_CMD_CHANGE_MTU:
+ /* If command is successful, change the MTU. */
+ netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
+ netdev->mtu, nctrl->ncmd.s.param2);
+ dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
+ netdev->name, netdev->mtu,
+ nctrl->ncmd.s.param2);
+ netdev->mtu = nctrl->ncmd.s.param2;
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
+
+/**
+ * \brief Converts a mask based on net device flags
+ * @param netdev network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ f |= OCTNET_IFFLAG_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+
+ if (netdev->flags & IFF_MULTICAST) {
+ f |= OCTNET_IFFLAG_MULTICAST;
+
+ /* Accept all multicast addresses if there are more than we
+ * can handle
+ */
+ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ f |= OCTNET_IFFLAG_BROADCAST;
+
+ return f;
+}
+
+/**
+ * \brief Net device set_multicast_list
+ * @param netdev network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ struct netdev_hw_addr *ha;
+ u64 *mc;
+ int ret, i;
+ int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = get_new_flags(netdev);
+ nctrl.ncmd.s.param3 = mc_count;
+ nctrl.ncmd.s.more = mc_count;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ i = 0;
+ mc = &nctrl.udd[0];
+ netdev_for_each_mc_addr(ha, netdev) {
+ *mc = 0;
+ memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
+ /* no need to swap bytes */
+
+ if (++mc > &nctrl.udd[mc_count])
+ break;
+ }
+
+ /* Apparently, any activity in this call from the kernel has to
+ * be atomic. So we won't wait for response.
+ */
+ nctrl.wait_time = 0;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+}
+
+/**
+ * \brief Net device set_mac_address
+ * @param netdev network device
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+ int ret = 0;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+
+ if ((!is_valid_ether_addr(addr->sa_data)) ||
+ (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+ return -EADDRNOTAVAIL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = 0;
+ nctrl.ncmd.s.more = 1;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ nctrl.wait_time = 100;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+ return -ENOMEM;
+ }
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+/**
+ * \brief Net device get_stats
+ * @param netdev network device
+ */
+static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+ struct octeon_device *oct;
+ u64 pkts = 0, drop = 0, bytes = 0;
+ struct oct_droq_stats *oq_stats;
+ struct oct_iq_stats *iq_stats;
+ int i, iq_no, oq_no;
+
+ oct = lio->oct_dev;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ iq_no = lio->linfo.txpciq[i];
+ iq_stats = &oct->instr_queue[iq_no]->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < lio->linfo.num_rxpciq; i++) {
+ oq_no = lio->linfo.rxpciq[i];
+ oq_stats = &oct->droq[oq_no]->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_nodispatch +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+
+ stats->rx_bytes = bytes;
+ stats->rx_packets = pkts;
+ stats->rx_dropped = drop;
+
+ return stats;
+}
+
+/**
+ * \brief Net device change_mtu
+ * @param netdev network device
+ */
+static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
+ int ret = 0;
+
+ /* Limit the MTU to make sure the ethernet packets are between 64 bytes
+ * and 65535 bytes
+ */
+ if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
+ (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
+ dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
+ (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
+ (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+ return -EINVAL;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = new_mtu;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_ORDERED;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
+ return -1;
+ }
+
+ lio->mtu = new_mtu;
+
+ return 0;
+}
+
+/**
+ * \brief Handler for SIOCSHWTSTAMP ioctl
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config conf;
+ struct lio *lio = GET_LIO(netdev);
+
+ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+ return -EFAULT;
+
+ if (conf.flags)
+ return -EINVAL;
+
+ switch (conf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (conf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ else
+ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * \brief ioctl handler
+ * @param netdev network device
+ * @param ifr interface request
+ * @param cmd command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return hwtstamp_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * \brief handle a Tx timestamp response
+ * @param status response status
+ * @param buf pointer to skb
+ */
+static void handle_timestamp(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octeon_soft_command *sc;
+ struct oct_timestamp_resp *resp;
+ struct lio *lio;
+ struct sk_buff *skb = (struct sk_buff *)buf;
+
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ lio = finfo->lio;
+ sc = finfo->sc;
+ oct = lio->oct_dev;
+ resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_DONE) {
+ dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ resp->timestamp = 0;
+ }
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
+ netif_info(lio, tx_done, lio->netdev,
+ "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+ skb, (unsigned long long)ns);
+ ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ octeon_free_soft_command(oct, sc);
+ recv_buffer_free(skb);
+}
+
+/* \brief Send a data packet that will be timestamped
+ * @param oct octeon device
+ * @param ndata pointer to network data
+ * @param finfo pointer to private network data
+ */
+static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
+{
+ int retval;
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_rdp *rdp;
+ struct lio *lio;
+ int ring_doorbell;
+
+ lio = finfo->lio;
+
+ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+ sizeof(struct oct_timestamp_resp));
+ finfo->sc = sc;
+
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+ return IQ_SEND_FAILED;
+ }
+
+ if (ndata->reqtype == REQTYPE_NORESP_NET)
+ ndata->reqtype = REQTYPE_RESP_NET;
+ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+ ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+ sc->callback = handle_timestamp;
+ sc->callback_arg = finfo->skb;
+ sc->iq_no = ndata->q_no;
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ ring_doorbell = !xmit_more;
+ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+ sc, ih->dlengsz, ndata->reqtype);
+
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ } else {
+ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+ }
+
+ return retval;
+}
+
+static inline int is_ipv4(struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->version == 4);
+}
+
+static inline int is_vlan(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_8021Q);
+}
+
+static inline int is_ip_fragmented(struct sk_buff *skb)
+{
+ /* The Don't fragment and Reserved flag fields are ignored.
+ * IP is fragmented if
+ * - the More fragments bit is set (indicating this IP is a fragment
+ * with more to follow; the current offset could be 0 ).
+ * - ths offset field is non-zero.
+ */
+ return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
+}
+
+static inline int is_ipv6(struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IPV6)) &&
+ (ipv6_hdr(skb)->version == 6);
+}
+
+static inline int is_with_extn_hdr(struct sk_buff *skb)
+{
+ return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
+ (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
+}
+
+static inline int is_tcpudp(struct sk_buff *skb)
+{
+ return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP);
+}
+
+static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
+{
+ u32 tag;
+ struct iphdr *iphdr = ip_hdr(skb);
+
+ tag = crc32(0, &iphdr->protocol, 1);
+ tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
+ tag = crc32(tag, skb_transport_header(skb), 4);
+ return tag;
+}
+
+static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
+{
+ u32 tag;
+ struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
+
+ tag = crc32(0, &ipv6hdr->nexthdr, 1);
+ tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
+ tag = crc32(tag, skb_transport_header(skb), 4);
+ return tag;
+}
+
+/** \brief Transmit networks packets to the Octeon interface
+ * @param skbuff skbuff struct to be passed to network layer.
+ * @param netdev pointer to network device
+ * @returns whether the packet was transmitted to the device okay or not
+ * (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct lio *lio;
+ struct octnet_buf_free_info *finfo;
+ union octnic_cmd_setup cmdsetup;
+ struct octnic_data_pkt ndata;
+ struct octeon_device *oct;
+ struct oct_iq_stats *stats;
+ int cpu = 0, status = 0;
+ int q_idx = 0, iq_no = 0;
+ int xmit_more;
+ u32 tag = 0;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ if (netif_is_multiqueue(netdev)) {
+ cpu = skb->queue_mapping;
+ q_idx = (cpu & (lio->linfo.num_txpciq - 1));
+ iq_no = lio->linfo.txpciq[q_idx];
+ } else {
+ iq_no = lio->txq;
+ }
+
+ stats = &oct->instr_queue[iq_no]->stats;
+
+ /* Check for all conditions in which the current packet cannot be
+ * transmitted.
+ */
+ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+ (!lio->linfo.link.s.status) ||
+ (skb->len <= 0)) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit failed link_status : %d\n",
+ lio->linfo.link.s.status);
+ goto lio_xmit_failed;
+ }
+
+ /* Use space in skb->cb to store info used to unmap and
+ * free the buffers.
+ */
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ finfo->lio = lio;
+ finfo->skb = skb;
+ finfo->sc = NULL;
+
+ /* Prepare the attributes for the data to be passed to OSI. */
+ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+ ndata.buf = (void *)finfo;
+
+ ndata.q_no = iq_no;
+
+ if (netif_is_multiqueue(netdev)) {
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
+ /* defer sending if queue is full */
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ stats->tx_iq_busy++;
+ return NETDEV_TX_BUSY;
+ }
+ } else {
+ if (octnet_iq_is_full(oct, lio->txq)) {
+ /* defer sending if queue is full */
+ stats->tx_iq_busy++;
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+ */
+
+ ndata.datasize = skb->len;
+
+ cmdsetup.u64 = 0;
+ cmdsetup.s.ifidx = lio->linfo.ifidx;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+ tag = get_ipv4_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+ if (ip_hdr(skb)->ihl > 5)
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV4OPTS;
+
+ } else if (is_ipv6(skb)) {
+ tag = get_ipv6_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
+
+ if (is_with_extn_hdr(skb))
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV6EXTHDR;
+
+ } else if (is_vlan(skb)) {
+ if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+ == htons(ETH_P_IP) &&
+ !is_ip_fragmented(skb) && is_tcpudp(skb)) {
+ tag = get_ipv4_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset =
+ sizeof(struct vlan_ethhdr) + 1;
+
+ if (ip_hdr(skb)->ihl > 5)
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV4OPTS;
+
+ } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
+ == htons(ETH_P_IPV6)) {
+ tag = get_ipv6_5tuple_tag(skb);
+
+ cmdsetup.s.cksum_offset =
+ sizeof(struct vlan_ethhdr) + 1;
+
+ if (is_with_extn_hdr(skb))
+ cmdsetup.s.ipv4opts_ipv6exthdr =
+ OCT_PKT_PARAM_IPV6EXTHDR;
+ }
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cmdsetup.s.timestamp = 1;
+ }
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ cmdsetup.s.u.datasize = skb->len;
+ octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ /* Offload checksum calculation for TCP/UDP packets */
+ ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ finfo->dptr = ndata.cmd.dptr;
+
+ ndata.reqtype = REQTYPE_NORESP_NET;
+
+ } else {
+ int i, frags;
+ struct skb_frag_struct *frag;
+ struct octnic_gather *g;
+
+ spin_lock(&lio->lock);
+ g = (struct octnic_gather *)list_delete_head(&lio->glist);
+ spin_unlock(&lio->lock);
+
+ if (!g) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit scatter gather: glist null!\n");
+ goto lio_xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+ octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+
+ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+ frags = skb_shinfo(skb)->nr_frags;
+ i = 1;
+ while (frags--) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ dma_map_page(&oct->pci_dev->dev,
+ frag->page.p,
+ frag->page_offset,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ i++;
+ }
+
+ ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ return NETDEV_TX_BUSY;
+ }
+
+ finfo->dptr = ndata.cmd.dptr;
+ finfo->g = g;
+
+ ndata.reqtype = REQTYPE_NORESP_NET_SG;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ struct octeon_instr_irh *irh =
+ (struct octeon_instr_irh *)&ndata.cmd.irh;
+ union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+
+ irh->len = 1; /* to indicate that ossp[0] contains tx_info */
+ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ }
+
+ xmit_more = skb->xmit_more;
+
+ if (unlikely(cmdsetup.s.timestamp))
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ else
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ if (status == IQ_SEND_FAILED)
+ goto lio_xmit_failed;
+
+ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+ if (status == IQ_SEND_STOP)
+ stop_q(lio->netdev, q_idx);
+
+ netdev->trans_start = jiffies;
+
+ stats->tx_done++;
+ stats->tx_tot_bytes += skb->len;
+
+ return NETDEV_TX_OK;
+
+lio_xmit_failed:
+ stats->tx_dropped++;
+ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+ iq_no, stats->tx_dropped);
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ recv_buffer_free(skb);
+ return NETDEV_TX_OK;
+}
+
+/** \brief Network device Tx timeout
+ * @param netdev pointer to network device
+ */
+static void liquidio_tx_timeout(struct net_device *netdev)
+{
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+ netdev->stats.tx_dropped);
+ netdev->trans_start = jiffies;
+ txqs_wake(netdev);
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct octnic_ctrl_params nparams;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = lio->linfo.ifidx;
+ nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ nparams.resp_order = OCTEON_RESP_NORESPONSE;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** \brief Net device fix features
+ * @param netdev pointer to network device
+ * @param request features requested
+ * @returns updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+ netdev_features_t request)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((request & NETIF_F_RXCSUM) &&
+ !(lio->dev_capability & NETIF_F_RXCSUM))
+ request &= ~NETIF_F_RXCSUM;
+
+ if ((request & NETIF_F_HW_CSUM) &&
+ !(lio->dev_capability & NETIF_F_HW_CSUM))
+ request &= ~NETIF_F_HW_CSUM;
+
+ if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+ request &= ~NETIF_F_TSO;
+
+ if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+ request &= ~NETIF_F_TSO6;
+
+ if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ /*Disable LRO if RXCSUM is off */
+ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ return request;
+}
+
+/** \brief Net device set features
+ * @param netdev pointer to network device
+ * @param features features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if (!((netdev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ else if (!(features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+
+ return 0;
+}
+
+static struct net_device_ops lionetdevops = {
+ .ndo_open = liquidio_open,
+ .ndo_stop = liquidio_stop,
+ .ndo_start_xmit = liquidio_xmit,
+ .ndo_get_stats = liquidio_get_stats,
+ .ndo_set_mac_address = liquidio_set_mac,
+ .ndo_set_rx_mode = liquidio_set_mcast_list,
+ .ndo_tx_timeout = liquidio_tx_timeout,
+ .ndo_change_mtu = liquidio_change_mtu,
+ .ndo_do_ioctl = liquidio_ioctl,
+ .ndo_fix_features = liquidio_fix_features,
+ .ndo_set_features = liquidio_set_features,
+};
+
+/** \brief Entry point for the liquidio module
+ */
+static int __init liquidio_init(void)
+{
+ int i;
+ struct handshake *hs;
+
+ init_completion(&first_stage);
+
+ octeon_init_device_list(conf_type);
+
+ if (liquidio_init_pci())
+ return -EINVAL;
+
+ wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion(&hs->init);
+ if (!hs->init_ok) {
+ /* init handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Failed to init device\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion_timeout(&hs->started,
+ msecs_to_jiffies(30000));
+ if (!hs->started_ok) {
+ /* starter handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Firmware failed to start\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ int ifidx = 0;
+ union oct_link_status *ls;
+ int i;
+
+ if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
+ (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+ dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+ recv_pkt->buffer_size[0],
+ recv_pkt->rh.r_nic_info.ifidx);
+ goto nic_info_err;
+ }
+
+ ifidx = recv_pkt->rh.r_nic_info.ifidx;
+ ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
+
+ octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+
+ update_link_status(oct->props[ifidx].netdev, ls);
+
+nic_info_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+/**
+ * \brief Setup network interfaces
+ * @param octeon_dev octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running. The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+ struct lio *lio = NULL;
+ struct net_device *netdev;
+ u8 mac[6], i, j;
+ struct octeon_soft_command *sc;
+ struct liquidio_if_cfg_context *ctx;
+ struct liquidio_if_cfg_resp *resp;
+ struct octdev_props *props;
+ int retval, num_iqueues, num_oqueues, q_no;
+ u64 q_mask;
+ int num_cpus = num_online_cpus();
+ union oct_nic_if_cfg if_cfg;
+ unsigned int base_queue;
+ unsigned int gmx_port_id;
+ u32 resp_size, ctx_size;
+
+ /* This is to handle link status changes */
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_INFO,
+ lio_nic_info, octeon_dev);
+
+ /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+ * They are handled directly.
+ */
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+ free_netbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+ free_netsgbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+ free_netsgbuf_with_resp);
+
+ for (i = 0; i < octeon_dev->ifcount; i++) {
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ ctx_size = sizeof(struct liquidio_if_cfg_context);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(octeon_dev, 0,
+ resp_size, ctx_size);
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
+
+ num_iqueues =
+ CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+ num_oqueues =
+ CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
+ base_queue =
+ CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
+ gmx_port_id =
+ CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
+ if (num_iqueues > num_cpus)
+ num_iqueues = num_cpus;
+ if (num_oqueues > num_cpus)
+ num_oqueues = num_cpus;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "requesting config for interface %d, iqs %d, oqs %d\n",
+ i, num_iqueues, num_oqueues);
+ ACCESS_ONCE(ctx->cond) = 0;
+ ctx->octeon_id = lio_get_device_id(octeon_dev);
+ init_waitqueue_head(&ctx->wc);
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = num_iqueues;
+ if_cfg.s.num_oqueues = num_oqueues;
+ if_cfg.s.base_queue = base_queue;
+ if_cfg.s.gmx_port_id = gmx_port_id;
+ octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_IF_CFG, i,
+ if_cfg.u64, 0);
+
+ sc->callback = if_cfg_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 1000;
+
+ retval = octeon_send_soft_command(octeon_dev, sc);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto setup_nic_dev_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ sleep_cond(&ctx->wc, &ctx->cond);
+ retval = resp->status;
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ num_iqueues = hweight64(resp->cfg_info.iqmask);
+ num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+ resp->cfg_info.iqmask,
+ resp->cfg_info.oqmask);
+ goto setup_nic_dev_fail;
+ }
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+ netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+
+ if (!netdev) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ props = &octeon_dev->props[i];
+ props->netdev = netdev;
+
+ if (num_iqueues > 1)
+ lionetdevops.ndo_select_queue = select_q;
+
+ /* Associate the routines that will handle different
+ * netdev tasks.
+ */
+ netdev->netdev_ops = &lionetdevops;
+
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->linfo.ifidx = resp->cfg_info.ifidx;
+ lio->ifidx = resp->cfg_info.ifidx;
+
+ lio->linfo.num_rxpciq = num_oqueues;
+ lio->linfo.num_txpciq = num_iqueues;
+ q_mask = resp->cfg_info.oqmask;
+ /* q_mask is 0-based and already verified mask is nonzero */
+ for (j = 0; j < num_oqueues; j++) {
+ q_no = __ffs64(q_mask);
+ q_mask &= (~(1UL << q_no));
+ lio->linfo.rxpciq[j] = q_no;
+ }
+ q_mask = resp->cfg_info.iqmask;
+ for (j = 0; j < num_iqueues; j++) {
+ q_no = __ffs64(q_mask);
+ q_mask &= (~(1UL << q_no));
+ lio->linfo.txpciq[j] = q_no;
+ }
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+
+ lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+ netdev->features = lio->dev_capability;
+ netdev->vlan_features = lio->dev_capability;
+
+ netdev->hw_features = lio->dev_capability;
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+ spin_lock_init(&lio->lock);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "if%d gmx: %d hw_addr: 0x%llx\n", i,
+ lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+ /* 64-bit swap required on LE machines */
+ octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+ for (j = 0; j < 6; j++)
+ mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+ /* Copy MAC Address to OS network device structure */
+
+ ether_addr_copy(netdev->dev_addr, mac);
+
+ if (setup_io_queues(octeon_dev, netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0];
+ lio->rxq = lio->linfo.rxpciq[0];
+
+ lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+ lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+ if (setup_glist(lio)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Gather list allocation failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ /* Register ethtool support */
+ liquidio_set_ethtool_ops(netdev);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+
+ if ((debug != -1) && (debug & NETIF_MSG_HW))
+ liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+
+ /* Register the network device with the OS */
+ if (register_netdev(netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+ goto setup_nic_dev_fail;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+ i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ netif_carrier_off(netdev);
+
+ if (lio->linfo.link.s.status) {
+ netif_carrier_on(netdev);
+ start_txq(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup successful\n", i);
+
+ octeon_free_soft_command(octeon_dev, sc);
+ }
+
+ return 0;
+
+setup_nic_dev_fail:
+
+ octeon_free_soft_command(octeon_dev, sc);
+
+ while (i--) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup failed\n", i);
+ liquidio_destroy_nic_device(octeon_dev, i);
+ }
+ return -ENODEV;
+}
+
+/**
+ * \brief initialize the NIC
+ * @param oct octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+ struct oct_intrmod_cfg *intrmod_cfg;
+ int retval = 0;
+ int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
+
+ dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+ /* only default iq and oq were initialized
+ * initialize the rest as well
+ */
+ /* run port_config command for each port */
+ oct->ifcount = num_nic_ports;
+
+ memset(oct->props, 0,
+ sizeof(struct octdev_props) * num_nic_ports);
+
+ retval = setup_nic_devices(oct);
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+ goto octnet_init_failure;
+ }
+
+ liquidio_ptp_init(oct);
+
+ /* Initialize interrupt moderation params */
+ intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
+ intrmod_cfg->intrmod_enable = 1;
+ intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
+ intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
+ intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
+ intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+ return retval;
+
+octnet_init_failure:
+
+ oct->ifcount = 0;
+
+ return retval;
+}
+
+/**
+ * \brief starter callback that invokes the remaining initialization work after
+ * the NIC is up and running.
+ * @param octptr work struct work_struct
+ */
+static void nic_starter(struct work_struct *work)
+{
+ struct octeon_device *oct;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+
+ oct = (struct octeon_device *)wk->ctxptr;
+
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
+ return;
+
+ /* If the status of the device is CORE_OK, the core
+ * application has reported its application type. Call
+ * any registered handlers now and move to the RUNNING
+ * state.
+ */
+ if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
+ schedule_delayed_work(&oct->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+ return;
+ }
+
+ atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+ if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
+ dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
+
+ if (liquidio_init_nic_module(oct))
+ dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
+ else
+ handshake[oct->octeon_id].started_ok = 1;
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Unexpected application running on NIC (%d). Check firmware.\n",
+ oct->app_mode);
+ }
+
+ complete(&handshake[oct->octeon_id].started);
+}
+
+/**
+ * \brief Device initialization for each Octeon device that is probed
+ * @param octeon_dev octeon device
+ */
+static int octeon_device_init(struct octeon_device *octeon_dev)
+{
+ int j, ret;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)octeon_dev->priv;
+ atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
+
+ /* Enable access to the octeon device and make its DMA capability
+ * known to the OS.
+ */
+ if (octeon_pci_os_setup(octeon_dev))
+ return 1;
+
+ /* Identify the Octeon type and map the BAR address space. */
+ if (octeon_chip_specific_setup(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+
+ octeon_dev->app_mode = CVM_DRV_INVALID_APP;
+
+ /* Do a soft reset of the Octeon device. */
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+
+ /* Initialize the dispatch mechanism used to push packets arriving on
+ * Octeon Output queues.
+ */
+ if (octeon_init_dispatch_list(octeon_dev))
+ return 1;
+
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_CORE_DRV_ACTIVE,
+ octeon_core_drv_init,
+ octeon_dev);
+
+ INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
+ octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
+ schedule_delayed_work(&octeon_dev->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+ octeon_set_io_queues_off(octeon_dev);
+
+ /* Setup the data structures that manage this Octeon's Input queues. */
+ if (octeon_setup_instr_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "instruction queue initialization failed\n");
+ /* On error, release any previously allocated queues */
+ for (j = 0; j < octeon_dev->num_iqs; j++)
+ octeon_delete_instr_queue(octeon_dev, j);
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from user & kernel applications for this octeon device.
+ */
+ if (octeon_setup_response_list(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+ if (octeon_setup_output_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
+ /* Release any previously allocated queues */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ octeon_delete_droq(octeon_dev, j);
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
+
+ /* The input and output queue registers were setup earlier (the queues
+ * were not enabled). Any additional registers that need to be
+ * programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
+
+ /* Initialize the tasklet that handles output queue packet processing.*/
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
+ tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
+ (unsigned long)octeon_dev);
+
+ /* Setup the interrupt handler and record the INT SUM register address
+ */
+ octeon_setup_interrupt(octeon_dev);
+
+ /* Enable Octeon device interrupts */
+ octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
+
+ /* Enable the input and output queues for this Octeon device */
+ octeon_dev->fn_list.enable_io_queues(octeon_dev);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+
+ if (ddr_timeout == 0) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
+
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+
+ /* Wait for the octeon to initialize DDR after the soft-reset. */
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
+ return 1;
+ }
+
+ if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ ret = octeon_add_console(octeon_dev, 0);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+
+ handshake[octeon_dev->octeon_id].init_ok = 1;
+ complete(&handshake[octeon_dev->octeon_id].init);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+
+ /* Send Credit for Octeon Output queues. Credits are always sent after
+ * the output queue is enabled.
+ */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ writel(octeon_dev->droq[j]->max_count,
+ octeon_dev->droq[j]->pkts_credit_reg);
+
+ /* Packets can start arriving on the output queues from this point. */
+
+ return 0;
+}
+
+/**
+ * \brief Exits the module
+ */
+static void __exit liquidio_exit(void)
+{
+ liquidio_deinit_pci();
+
+ pr_info("LiquidIO network module is now unloaded\n");
+}
+
+module_init(liquidio_init);
+module_exit(liquidio_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
new file mode 100644
index 000000000000..0ac347ccc8ba
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -0,0 +1,673 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file liquidio_common.h
+ * \brief Common: Structures and macros used in PCI-NIC package by core and
+ * host driver.
+ */
+
+#ifndef __LIQUIDIO_COMMON_H__
+#define __LIQUIDIO_COMMON_H__
+
+#include "octeon_config.h"
+
+#define LIQUIDIO_VERSION "1.1.9"
+#define LIQUIDIO_MAJOR_VERSION 1
+#define LIQUIDIO_MINOR_VERSION 1
+#define LIQUIDIO_MICRO_VERSION 9
+
+#define CONTROL_IQ 0
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ ORDERED_TAG = 0,
+ ATOMIC_TAG = 1,
+ NULL_TAG = 2,
+ NULL_NULL_TAG = 3
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* Opcodes used by host driver/apps to perform operations on the core.
+ * These are used to identify the major subsystem that the operation
+ * is for.
+ */
+#define OPCODE_CORE 0 /* used for generic core operations */
+#define OPCODE_NIC 1 /* used for NIC operations */
+#define OPCODE_LAST OPCODE_NIC
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define OPCODE_SUBCODE(op, sub) (((op & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** OPCODE_CORE subcodes. For future use. */
+
+/** OPCODE_NIC subcodes */
+
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define OPCODE_NIC_CORE_DRV_ACTIVE 0x01
+#define OPCODE_NIC_NW_DATA 0x02 /* network packet data */
+#define OPCODE_NIC_CMD 0x03
+#define OPCODE_NIC_INFO 0x04
+#define OPCODE_NIC_PORT_STATS 0x05
+#define OPCODE_NIC_MDIO45 0x06
+#define OPCODE_NIC_TIMESTAMP 0x07
+#define OPCODE_NIC_INTRMOD_CFG 0x08
+#define OPCODE_NIC_IF_CFG 0x09
+
+#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
+
+#define OPCODE_SLOW_PATH(rh) \
+ (OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \
+ OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA))
+
+/* Application codes advertised by the core driver initialization packet. */
+#define CVM_DRV_APP_START 0x0
+#define CVM_DRV_NO_APP 0
+#define CVM_DRV_APP_COUNT 0x2
+#define CVM_DRV_BASE_APP (CVM_DRV_APP_START + 0x0)
+#define CVM_DRV_NIC_APP (CVM_DRV_APP_START + 0x1)
+#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2)
+#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+#define INCR_INDEX(index, count, max) \
+do { \
+ if (((index) + (count)) >= (max)) \
+ index = ((index) + (count)) - (max); \
+ else \
+ index += (count); \
+} while (0)
+
+#define INCR_INDEX_BY1(index, max) \
+do { \
+ if ((++(index)) == (max)) \
+ index = 0; \
+} while (0)
+
+#define DECR_INDEX(index, count, max) \
+do { \
+ if ((count) > (index)) \
+ index = ((max) - ((count - index))); \
+ else \
+ index -= count; \
+} while (0)
+
+#define OCT_BOARD_NAME 32
+#define OCT_SERIAL_LEN 64
+
+/* Structure used by core driver to send indication that the Octeon
+ * application is ready.
+ */
+struct octeon_core_setup {
+ u64 corefreq;
+
+ char boardname[OCT_BOARD_NAME];
+
+ char board_serial_number[OCT_SERIAL_LEN];
+
+ u64 board_rev_major;
+
+ u64 board_rev_minor;
+
+};
+
+/*--------------------------- SCATTER GATHER ENTRY -----------------------*/
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * a Octeon input instruction has this format.
+ */
+struct octeon_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr.*/
+ union {
+ u16 size[4];
+ u64 size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ u64 ptr[4];
+
+};
+
+#define OCT_SG_ENTRY_SIZE (sizeof(struct octeon_sg_entry))
+
+/* \brief Add size to gather list
+ * @param sg_entry scatter/gather entry
+ * @param size size to add
+ * @param pos position to add it.
+ */
+static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
+ u16 size,
+ u32 pos)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ sg_entry->u.size[pos] = size;
+#else
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/*------------------------- End Scatter/Gather ---------------------------*/
+
+#define OCTNET_FRM_PTP_HEADER_SIZE 8
+#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
+#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
+
+#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
+
+/** NIC Commands are sent using this Octeon Input Queue */
+#define OCTNET_CMD_Q 0
+
+/* NIC Command types */
+#define OCTNET_CMD_CHANGE_MTU 0x1
+#define OCTNET_CMD_CHANGE_MACADDR 0x2
+#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
+#define OCTNET_CMD_RX_CTL 0x4
+
+#define OCTNET_CMD_SET_MULTI_LIST 0x5
+#define OCTNET_CMD_CLEAR_STATS 0x6
+
+/* command for setting the speed, duplex & autoneg */
+#define OCTNET_CMD_SET_SETTINGS 0x7
+#define OCTNET_CMD_SET_FLOW_CTL 0x8
+
+#define OCTNET_CMD_MDIO_READ_WRITE 0x9
+#define OCTNET_CMD_GPIO_ACCESS 0xA
+#define OCTNET_CMD_LRO_ENABLE 0xB
+#define OCTNET_CMD_LRO_DISABLE 0xC
+#define OCTNET_CMD_SET_RSS 0xD
+#define OCTNET_CMD_WRITE_SA 0xE
+#define OCTNET_CMD_DELETE_SA 0xF
+#define OCTNET_CMD_UPDATE_SA 0x12
+
+#define OCTNET_CMD_TNL_RX_CSUM_CTL 0x10
+#define OCTNET_CMD_TNL_TX_CSUM_CTL 0x11
+#define OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13
+#define OCTNET_CMD_VERBOSE_ENABLE 0x14
+#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define CNNIC_L4SUM_VERIFIED 0x1
+#define CNNIC_IPSUM_VERIFIED 0x2
+#define CNNIC_TUN_CSUM_VERIFIED 0x4
+#define CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED)
+
+/*LROIPV4 and LROIPV6 Flags*/
+#define OCTNIC_LROIPV4 0x1
+#define OCTNIC_LROIPV6 0x2
+
+/* Interface flags communicated between host driver and core app. */
+enum octnet_ifflags {
+ OCTNET_IFFLAG_PROMISC = 0x01,
+ OCTNET_IFFLAG_ALLMULTI = 0x02,
+ OCTNET_IFFLAG_MULTICAST = 0x04,
+ OCTNET_IFFLAG_BROADCAST = 0x08,
+ OCTNET_IFFLAG_UNICAST = 0x10
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCT_NET_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octnet_cmd {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 cmd:5;
+
+ u64 more:6; /* How many udd words follow the command */
+
+ u64 param1:29;
+
+ u64 param2:16;
+
+ u64 param3:8;
+
+#else
+
+ u64 param3:8;
+
+ u64 param2:16;
+
+ u64 param1:29;
+
+ u64 more:6;
+
+ u64 cmd:5;
+
+#endif
+ } s;
+
+};
+
+#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+
+/** Instruction Header */
+struct octeon_instr_ih {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Tag Value */
+ u64 tag:32;
+#else
+ /** Tag Value */
+ u64 tag:32;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 opcode:4;
+ u64 rflag:1;
+ u64 subcode:7;
+ u64 len:3;
+ u64 rid:13;
+ u64 reserved:4;
+ u64 ossp:32; /* opcode/subcode specific parameters */
+#else
+ u64 ossp:32; /* opcode/subcode specific parameters */
+ u64 reserved:4;
+ u64 rid:13;
+ u64 len:3;
+ u64 subcode:7;
+ u64 rflag:1;
+ u64 opcode:4;
+#endif
+};
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:49;
+ u64 pcie_port:3;
+ u64 rlen:12;
+#else
+ u64 rlen:12;
+ u64 pcie_port:3;
+ u64 reserved:49;
+#endif
+};
+
+/** Receive Header */
+union octeon_rh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 u64;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 reserved:4;
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 extra:24;
+ u64 link:8;
+ u64 csum_verified:3; /** checksum verified. */
+ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ } r_dh;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13; /** request id in response to pkt sent by host */
+ u64 num_gmx_ports:8;
+ u64 max_nic_ports:8;
+ u64 app_cap_flags:4;
+ u64 app_mode:16;
+ } r_core_drv_init;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 rid:13;
+ u64 reserved:4;
+ u64 extra:25;
+ u64 ifidx:7;
+ } r_nic_info;
+#else
+ u64 u64;
+ struct {
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 reserved:4;
+ u64 rid:13; /** req id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r;
+ struct {
+ u64 has_hwtstamp:1; /** 1 = has hwtstamp */
+ u64 csum_verified:3; /** checksum verified. */
+ u64 link:8;
+ u64 extra:24;
+ u64 rid:13; /** req id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_dh;
+ struct {
+ u64 app_mode:16;
+ u64 app_cap_flags:4;
+ u64 max_nic_ports:8;
+ u64 num_gmx_ports:8;
+ u64 rid:13;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_core_drv_init;
+ struct {
+ u64 ifidx:7;
+ u64 extra:25;
+ u64 reserved:4;
+ u64 rid:13;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_nic_info;
+#endif
+};
+
+#define OCT_RH_SIZE (sizeof(union octeon_rh))
+
+#define OCT_PKT_PARAM_IPV4OPTS 1
+#define OCT_PKT_PARAM_IPV6EXTHDR 2
+
+union octnic_packet_params {
+ u32 u32;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved:6;
+ u32 tnl_csum:1;
+ u32 ip_csum:1;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ipsec_ops:4;
+ u32 tsflag:1;
+ u32 csoffset:9;
+ u32 ifidx:8;
+#else
+ u32 ifidx:8;
+ u32 csoffset:9;
+ u32 tsflag:1;
+ u32 ipsec_ops:4;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ip_csum:1;
+ u32 tnl_csum:1;
+ u32 reserved:6;
+#endif
+ } s;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union oct_link_status {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 duplex:8;
+ u64 status:8;
+ u64 mtu:16;
+ u64 speed:16;
+ u64 autoneg:1;
+ u64 interface:4;
+ u64 pause:1;
+ u64 reserved:10;
+#else
+ u64 reserved:10;
+ u64 pause:1;
+ u64 interface:4;
+ u64 autoneg:1;
+ u64 speed:16;
+ u64 mtu:16;
+ u64 status:8;
+ u64 duplex:8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct oct_link_info {
+ union oct_link_status link;
+ u64 hw_addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gmxport;
+ u8 rsvd[3];
+ u8 num_txpciq;
+ u8 num_rxpciq;
+ u8 ifidx;
+#else
+ u8 ifidx;
+ u8 num_rxpciq;
+ u8 num_txpciq;
+ u8 rsvd[3];
+ u16 gmxport;
+#endif
+
+ u8 txpciq[MAX_IOQS_PER_NICIF];
+ u8 rxpciq[MAX_IOQS_PER_NICIF];
+};
+
+#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
+
+struct liquidio_if_cfg_info {
+ u64 ifidx;
+ u64 iqmask; /** mask for IQs enabled for the port */
+ u64 oqmask; /** mask for OQs enabled for the port */
+ struct oct_link_info linfo; /** initial link information */
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_rx_stats {
+ /* link-level stats */
+ u64 total_rcvd;
+ u64 bytes_rcvd;
+ u64 total_bcst;
+ u64 total_mcst;
+ u64 runts;
+ u64 ctl_rcvd;
+ u64 fifo_err; /* Accounts for over/under-run of buffers */
+ u64 dmac_drop;
+ u64 fcs_err;
+ u64 jabber_err;
+ u64 l2_err;
+ u64 frame_err;
+
+ /* firmware stats */
+ u64 fw_total_rcvd;
+ u64 fw_total_fwd;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+ u64 fw_lro_pkts; /* Number of packets that are LROed */
+ u64 fw_lro_octs; /* Number of octets that are LROed */
+ u64 fw_total_lro; /* Number of LRO packets formed */
+ u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ /* intrmod: packet forward rate */
+ u64 fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_tx_stats {
+ /* link-level stats */
+ u64 total_pkts_sent;
+ u64 total_bytes_sent;
+ u64 mcast_pkts_sent;
+ u64 bcast_pkts_sent;
+ u64 ctl_sent;
+ u64 one_collision_sent; /* Packets sent after one collision*/
+ u64 multi_collision_sent; /* Packets sent after multiple collision*/
+ u64 max_collision_fail; /* Packets not sent due to max collisions */
+ u64 max_deferral_fail; /* Packets not sent due to max deferrals */
+ u64 fifo_err; /* Accounts for over/under-run of buffers */
+ u64 runts;
+ u64 total_collisions; /* Total number of collisions detected */
+
+ /* firmware stats */
+ u64 fw_total_sent;
+ u64 fw_total_fwd;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+};
+
+struct oct_link_stats {
+ struct nic_rx_stats fromwire;
+ struct nic_tx_stats fromhost;
+
+};
+
+#define LIO68XX_LED_CTRL_ADDR 0x3501
+#define LIO68XX_LED_CTRL_CFGON 0x1f
+#define LIO68XX_LED_CTRL_CFGOFF 0x100
+#define LIO68XX_LED_BEACON_ADDR 0x3508
+#define LIO68XX_LED_BEACON_CFGON 0x47fd
+#define LIO68XX_LED_BEACON_CFGOFF 0x11fc
+#define VITESSE_PHY_GPIO_DRIVEON 0x1
+#define VITESSE_PHY_GPIO_CFG 0x8
+#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
+#define VITESSE_PHY_GPIO_HIGH 0x2
+#define VITESSE_PHY_GPIO_LOW 0x3
+
+struct oct_mdio_cmd {
+ u64 op;
+ u64 mdio_addr;
+ u64 value1;
+ u64 value2;
+ u64 value3;
+};
+
+#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+
+#define LIO_INTRMOD_CHECK_INTERVAL 1
+#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
+#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
+#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
+#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
+#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
+
+struct oct_intrmod_cfg {
+ u64 intrmod_enable;
+ u64 intrmod_check_intrvl;
+ u64 intrmod_maxpkt_ratethr;
+ u64 intrmod_minpkt_ratethr;
+ u64 intrmod_maxcnt_trigger;
+ u64 intrmod_maxtmr_trigger;
+ u64 intrmod_mincnt_trigger;
+ u64 intrmod_mintmr_trigger;
+};
+
+#define BASE_QUEUE_NOT_REQUESTED 65535
+
+union oct_nic_if_cfg {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 base_queue:16;
+ u64 num_iqueues:16;
+ u64 num_oqueues:16;
+ u64 gmx_port_id:8;
+ u64 reserved:8;
+#else
+ u64 reserved:8;
+ u64 gmx_port_id:8;
+ u64 num_oqueues:16;
+ u64 num_iqueues:16;
+ u64 base_queue:16;
+#endif
+ } s;
+};
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
new file mode 100644
index 000000000000..93819bd8602b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -0,0 +1,57 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#ifndef _LIQUIDIO_IMAGE_H_
+#define _LIQUIDIO_IMAGE_H_
+
+#define LIO_MAX_FW_TYPE_LEN (8)
+#define LIO_MAX_FW_FILENAME_LEN (256)
+#define LIO_FW_DIR "liquidio/"
+#define LIO_FW_BASE_NAME "lio_"
+#define LIO_FW_NAME_SUFFIX ".bin"
+#define LIO_FW_NAME_TYPE_NIC "nic"
+#define LIO_FW_NAME_TYPE_NONE "none"
+#define LIO_MAX_FIRMWARE_VERSION_LEN 16
+
+#define LIO_MAX_BOOTCMD_LEN 1024
+#define LIO_MAX_IMAGES 16
+#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */
+struct octeon_firmware_desc {
+ __be64 addr;
+ __be32 len;
+ __be32 crc32; /* crc32 of image */
+};
+
+/* Following the header is a list of 64-bit aligned binary images,
+ * as described by the desc field.
+ * Numeric fields are in network byte order.
+ */
+struct octeon_firmware_file_header {
+ __be32 magic;
+ char version[LIO_MAX_FIRMWARE_VERSION_LEN];
+ char bootcmd[LIO_MAX_BOOTCMD_LEN];
+ __be32 num_images;
+ struct octeon_firmware_desc desc[LIO_MAX_IMAGES];
+ __be32 pad;
+ __be32 crc32; /* header checksum */
+};
+
+#endif /* _LIQUIDIO_IMAGE_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
new file mode 100644
index 000000000000..62a8dd5cd3dc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -0,0 +1,424 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_config.h
+ * \brief Host Driver: Configuration data structures for the host driver.
+ */
+
+#ifndef __OCTEON_CONFIG_H__
+#define __OCTEON_CONFIG_H__
+
+/*--------------------------CONFIG VALUES------------------------*/
+
+/* The following macros affect the way the driver data structures
+ * are generated for Octeon devices.
+ * They can be modified.
+ */
+
+/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
+ * multiple(<= MAX_OCTEON_NICIF) Miniports
+ */
+#define MAX_OCTEON_NICIF 32
+#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
+#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
+#define MAX_OCTEON_MULTICAST_ADDR 32
+
+/* CN6xxx IQ configuration macros */
+#define CN6XXX_MAX_INPUT_QUEUES 32
+#define CN6XXX_MAX_IQ_DESCRIPTORS 2048
+#define CN6XXX_DB_MIN 1
+#define CN6XXX_DB_MAX 8
+#define CN6XXX_DB_TIMEOUT 1
+
+/* CN6xxx OQ configuration macros */
+#define CN6XXX_MAX_OUTPUT_QUEUES 32
+#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
+#define CN6XXX_OQ_BUF_SIZE 1536
+#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+
+#define CN6XXX_OQ_INTR_PKT 64
+#define CN6XXX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_66XX 2
+#define DEFAULT_NUM_NIC_PORTS_68XX 4
+#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+
+/* common OCTEON configuration macros */
+#define CN6XXX_CFG_IO_QUEUES 32
+#define OCTEON_32BYTE_INSTR 32
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_MAX_BASE_IOQ 4
+#define OCTEON_OQ_BUFPTR_MODE 0
+#define OCTEON_OQ_INFOPTR_MODE 1
+
+#define OCTEON_DMA_INTR_PKT 64
+#define OCTEON_DMA_INTR_TIME 1000
+
+#define MAX_TXQS_PER_INTF 8
+#define MAX_RXQS_PER_INTF 8
+#define DEF_TXQS_PER_INTF 4
+#define DEF_RXQS_PER_INTF 4
+
+#define INVALID_IOQ_NO 0xff
+
+#define DEFAULT_POW_GRP 0
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs)
+#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+
+#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
+#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr)
+#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val
+#define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val
+
+#define CFG_GET_DMA_INTR_PKT(cfg) ((cfg)->dma.dma_intr_pkt)
+#define CFG_GET_DMA_INTR_TIME(cfg) ((cfg)->dma.dma_intr_time)
+#define CFG_GET_NUM_NIC_PORTS(cfg) ((cfg)->num_nic_ports)
+#define CFG_GET_NUM_DEF_TX_DESCS(cfg) ((cfg)->num_def_tx_descs)
+#define CFG_GET_NUM_DEF_RX_DESCS(cfg) ((cfg)->num_def_rx_descs)
+#define CFG_GET_DEF_RX_BUF_SIZE(cfg) ((cfg)->def_rx_buf_size)
+
+#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_txqs)
+#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_txqs)
+#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_rxqs)
+#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rxqs)
+#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rx_descs)
+#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_tx_descs)
+#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].rx_buf_size)
+#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].base_queue)
+#define CFG_GET_GMXID_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].gmx_port_id)
+
+#define CFG_GET_CTRL_Q_GRP(cfg) ((cfg)->misc.ctrlq_grp)
+#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.host_link_query_interval)
+#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.oct_link_query_interval)
+#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
+
+/* Max IOQs per OCTEON Link */
+#define MAX_IOQS_PER_NICIF 32
+
+enum lio_card_type {
+ LIO_210SV = 0, /* Two port, 66xx */
+ LIO_210NV, /* Two port, 68xx */
+ LIO_410NV /* Four port, 68xx */
+};
+
+#define LIO_210SV_NAME "210sv"
+#define LIO_210NV_NAME "210nv"
+#define LIO_410NV_NAME "410nv"
+
+/** Structure to define the configuration attributes for each Input queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_iq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:32;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+#else
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ u64 reserved:32;
+#endif
+};
+
+/** Structure to define the configuration attributes for each Output queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_oq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:16;
+
+ u64 pkts_per_intr:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ u64 info_ptr:32;
+
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+#else
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ u64 info_ptr:32;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ u64 pkts_per_intr:16;
+
+ u64 reserved:16;
+#endif
+
+};
+
+/** This structure conatins the NIC link configuration attributes,
+ * common for all the OCTEON Modles.
+ */
+struct octeon_nic_if_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:56;
+
+ u64 base_queue:16;
+
+ u64 gmx_port_id:8;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+#else
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ u64 gmx_port_id:8;
+
+ u64 base_queue:16;
+
+ u64 reserved:56;
+#endif
+
+};
+
+/** Structure to define the configuration attributes for meta data.
+ * Applicable to all Octeon processors.
+ */
+
+struct octeon_misc_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Host link status polling period */
+ u64 host_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 oct_link_query_interval:32;
+
+ u64 enable_sli_oq_bp:1;
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+#else
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+ /** BP for SLI OQ */
+ u64 enable_sli_oq_bp:1;
+ /** Host link status polling period */
+ u64 oct_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 host_link_query_interval:32;
+#endif
+};
+
+/** Structure to define the configuration for all OCTEON processors. */
+struct octeon_config {
+ u16 card_type;
+ char *card_name;
+
+ /** Input Queue attributes. */
+ struct octeon_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct octeon_oq_config oq;
+
+ /** NIC Port Configuration */
+ struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF];
+
+ /** Miscellaneous attributes */
+ struct octeon_misc_config misc;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+
+};
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
+#define MAX_BAR1_MAP_INDEX 2
+#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024)
+
+/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access.
+ * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access.
+ */
+#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \
+ OCTEON_BAR1_ENTRY_SIZE)
+
+/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
+ * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
+ */
+#define MAX_RESPONSE_LISTS 4
+
+/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
+ * dispatch table.
+ */
+#define OPCODE_MASK_BITS 6
+
+/* Mask for the 6-bit lookup hash */
+#define OCTEON_OPCODE_MASK 0x3f
+
+/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */
+#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+
+#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
new file mode 100644
index 000000000000..466147e409c9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -0,0 +1,723 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/**
+ * @file octeon_console.c
+ */
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void octeon_remote_lock(void);
+static void octeon_remote_unlock(void);
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags);
+
+#define MIN(a, b) min((a), (b))
+#define CAST_ULL(v) ((u64)(v))
+
+#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008
+#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004
+#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000
+#define BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100
+#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248
+
+#define OCTEON_PCI_IO_BUF_OWNER_OCTEON 0x00000001
+#define OCTEON_PCI_IO_BUF_OWNER_HOST 0x00000002
+
+/** Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/** minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/** CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/* Current versions */
+#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1
+#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0
+#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console"
+#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */
+
+/* First three members of cvmx_bootmem_desc are left in original
+** positions for backwards compatibility.
+** Assumes big endian target
+*/
+struct cvmx_bootmem_desc {
+ /** spinlock to control access to list */
+ u32 lock;
+
+ /** flags for indicating various conditions */
+ u32 flags;
+
+ u64 head_addr;
+
+ /** incremented changed when incompatible changes made */
+ u32 major_version;
+
+ /** incremented changed when compatible changes made,
+ * reset to zero when major incremented
+ */
+ u32 minor_version;
+
+ u64 app_data_addr;
+ u64 app_data_size;
+
+ /** number of elements in named blocks array */
+ u32 nb_num_blocks;
+
+ /** length of name array in bootmem blocks */
+ u32 named_block_name_len;
+
+ /** address of named memory block descriptors */
+ u64 named_block_array_addr;
+};
+
+/* Structure that defines a single console.
+ *
+ * Note: when read_index == write_index, the buffer is empty.
+ * The actual usable size of each console is console_buf_size -1;
+ */
+struct octeon_pci_console {
+ u64 input_base_addr;
+ u32 input_read_index;
+ u32 input_write_index;
+ u64 output_base_addr;
+ u32 output_read_index;
+ u32 output_write_index;
+ u32 lock;
+ u32 buf_size;
+};
+
+/* This is the main container structure that contains all the information
+ * about all PCI consoles. The address of this structure is passed to various
+ * routines that operation on PCI consoles.
+ */
+struct octeon_pci_console_desc {
+ u32 major_version;
+ u32 minor_version;
+ u32 lock;
+ u32 flags;
+ u32 num_consoles;
+ u32 pad;
+ /* must be 64 bit aligned here... */
+ /* Array of addresses of octeon_pci_console structures */
+ u64 console_addr_array[0];
+ /* Implicit storage for console_addr_array */
+};
+
+/**
+ * This macro returns the size of a member of a structure.
+ * Logically it is the same as "sizeof(s::field)" in C++, but
+ * C lacks the "::" operator.
+ */
+#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field)
+
+/**
+ * This macro returns a member of the cvmx_bootmem_desc
+ * structure. These members can't be directly addressed as
+ * they might be in memory not directly reachable. In the case
+ * where bootmem is compiled with LINUX_HOST, the structure
+ * itself might be located on a remote Octeon. The argument
+ * "field" is the member name of the cvmx_bootmem_desc to read.
+ * Regardless of the type of the field, the return type is always
+ * a u64.
+ */
+#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field) \
+ __cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr, \
+ offsetof(struct cvmx_bootmem_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
+
+#define __cvmx_bootmem_lock(flags)
+#define __cvmx_bootmem_unlock(flags)
+
+/**
+ * This macro returns a member of the
+ * cvmx_bootmem_named_block_desc structure. These members can't
+ * be directly addressed as they might be in memory not directly
+ * reachable. In the case where bootmem is compiled with
+ * LINUX_HOST, the structure itself might be located on a remote
+ * Octeon. The argument "field" is the member name of the
+ * cvmx_bootmem_named_block_desc to read. Regardless of the type
+ * of the field, the return type is always a u64. The "addr"
+ * parameter is the physical address of the structure.
+ */
+#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field) \
+ __cvmx_bootmem_desc_get(oct, addr, \
+ offsetof(struct cvmx_bootmem_named_block_desc, field), \
+ SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field))
+
+/**
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param oct Pointer to current octeon device
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
+ u64 base,
+ u32 offset,
+ u32 size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return octeon_read_device_mem32(oct, base);
+ case 8:
+ return octeon_read_device_mem64(oct, base);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
+ u64 addr,
+ char *str,
+ u32 len)
+{
+ addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+ octeon_pci_read_core_mem(oct, addr, str, len);
+ str[len] = 0;
+}
+
+/* See header file for descriptions of functions */
+
+/**
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(struct octeon_device *oct,
+ u32 exact_match)
+{
+ u32 major_version;
+ u32 minor_version;
+
+ if (!oct->bootmem_desc_addr)
+ oct->bootmem_desc_addr =
+ octeon_read_device_mem64(oct,
+ BOOTLOADER_PCI_READ_DESC_ADDR);
+ major_version =
+ (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version);
+ minor_version =
+ (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version);
+ dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
+ major_version);
+ if ((major_version > 3) ||
+ (exact_match && major_version != exact_match)) {
+ dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n",
+ major_version, minor_version,
+ CAST_ULL(oct->bootmem_desc_addr));
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static const struct cvmx_bootmem_named_block_desc
+*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct,
+ const char *name, u32 flags)
+{
+ struct cvmx_bootmem_named_block_desc *desc =
+ &oct->bootmem_named_block_desc;
+ u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags);
+
+ if (named_addr) {
+ desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+ base_addr);
+ desc->size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size);
+ strncpy(desc->name, name, sizeof(desc->name));
+ desc->name[sizeof(desc->name) - 1] = 0;
+ return &oct->bootmem_named_block_desc;
+ } else {
+ return NULL;
+ }
+}
+
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags)
+{
+ u64 result = 0;
+
+ __cvmx_bootmem_lock(flags);
+ if (!__cvmx_bootmem_check_version(oct, 3)) {
+ u32 i;
+ u64 named_block_array_addr =
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct,
+ named_block_array_addr);
+ u32 num_blocks = (u32)
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks);
+ u32 name_length = (u32)
+ CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len);
+ u64 named_addr = named_block_array_addr;
+
+ for (i = 0; i < num_blocks; i++) {
+ u64 named_size =
+ CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr,
+ size);
+ if (name && named_size) {
+ char *name_tmp =
+ kmalloc(name_length + 1, GFP_KERNEL);
+ CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
+ name_tmp,
+ name_length);
+ if (!strncmp(name, name_tmp, name_length)) {
+ result = named_addr;
+ kfree(name_tmp);
+ break;
+ }
+ kfree(name_tmp);
+ } else if (!name && !named_size) {
+ result = named_addr;
+ break;
+ }
+
+ named_addr +=
+ sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+ }
+ __cvmx_bootmem_unlock(flags);
+ return result;
+}
+
+/**
+ * Find a named block on the remote Octeon
+ *
+ * @param name Name of block to find
+ * @param base_addr Address the block is at (OUTPUT)
+ * @param size The size of the block (OUTPUT)
+ *
+ * @return Zero on success, One on failure.
+ */
+static int octeon_named_block_find(struct octeon_device *oct, const char *name,
+ u64 *base_addr, u64 *size)
+{
+ const struct cvmx_bootmem_named_block_desc *named_block;
+
+ octeon_remote_lock();
+ named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0);
+ octeon_remote_unlock();
+ if (named_block) {
+ *base_addr = named_block->base_addr;
+ *size = named_block->size;
+ return 0;
+ }
+ return 1;
+}
+
+static void octeon_remote_lock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+static void octeon_remote_unlock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths)
+{
+ u32 len = strlen(cmd_str);
+
+ dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
+
+ if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) {
+ dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n",
+ BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1);
+ return -1;
+ }
+
+ if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) {
+ dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n");
+ return -1;
+ }
+
+ /* Write command to bootloader */
+ octeon_remote_lock();
+ octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR,
+ (u8 *)cmd_str, len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR,
+ len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR,
+ OCTEON_PCI_IO_BUF_OWNER_OCTEON);
+
+ /* Bootloader should accept command very quickly
+ * if it really was ready
+ */
+ if (octeon_wait_for_bootloader(oct, 200) != 0) {
+ octeon_remote_unlock();
+ dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n");
+ return -1;
+ }
+ octeon_remote_unlock();
+ return 0;
+}
+
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths)
+{
+ dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n",
+ wait_time_hundredths);
+
+ if (octeon_mem_access_ok(oct))
+ return -1;
+
+ while (wait_time_hundredths > 0 &&
+ octeon_read_device_mem32(oct,
+ BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR)
+ != OCTEON_PCI_IO_BUF_OWNER_HOST) {
+ if (--wait_time_hundredths <= 0)
+ return -1;
+ schedule_timeout_uninterruptible(HZ / 100);
+ }
+ return 0;
+}
+
+static void octeon_console_handle_result(struct octeon_device *oct,
+ size_t console_num,
+ char *buffer, s32 bytes_read)
+{
+ struct octeon_console *console;
+
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+}
+
+static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES];
+
+static void output_console_line(struct octeon_device *oct,
+ struct octeon_console *console,
+ size_t console_num,
+ char *console_buffer,
+ s32 bytes_read)
+{
+ char *line;
+ s32 i;
+
+ line = console_buffer;
+ for (i = 0; i < bytes_read; i++) {
+ /* Output a line at a time, prefixed */
+ if (console_buffer[i] == '\n') {
+ console_buffer[i] = '\0';
+ if (console->leftover[0]) {
+ dev_info(&oct->pci_dev->dev, "%lu: %s%s\n",
+ console_num, console->leftover,
+ line);
+ console->leftover[0] = '\0';
+ } else {
+ dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ console_num, line);
+ }
+ line = &console_buffer[i + 1];
+ }
+ }
+
+ /* Save off any leftovers */
+ if (line != &console_buffer[bytes_read]) {
+ console_buffer[bytes_read] = '\0';
+ strcpy(console->leftover, line);
+ }
+}
+
+static void check_console(struct work_struct *work)
+{
+ s32 bytes_read, tries, total_read;
+ struct octeon_console *console;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ size_t console_num = wk->ctxul;
+ u32 delay;
+
+ console = &oct->console[console_num];
+ tries = 0;
+ total_read = 0;
+
+ do {
+ /* Take console output regardless of whether it will
+ * be logged
+ */
+ bytes_read =
+ octeon_console_read(oct, console_num, console_buffer,
+ sizeof(console_buffer) - 1, 0);
+ if (bytes_read > 0) {
+ total_read += bytes_read;
+ if (console->waiting) {
+ octeon_console_handle_result(oct, console_num,
+ console_buffer,
+ bytes_read);
+ }
+ if (octeon_console_debug_enabled(console_num)) {
+ output_console_line(oct, console, console_num,
+ console_buffer, bytes_read);
+ }
+ } else if (bytes_read < 0) {
+ dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+ console_num, bytes_read);
+ }
+
+ tries++;
+ } while ((bytes_read > 0) && (tries < 16));
+
+ /* If nothing is read after polling the console,
+ * output any leftovers if any
+ */
+ if (octeon_console_debug_enabled(console_num) &&
+ (total_read == 0) && (console->leftover[0])) {
+ dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ console_num, console->leftover);
+ console->leftover[0] = '\0';
+ }
+
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+
+ schedule_delayed_work(&wk->work, msecs_to_jiffies(delay));
+}
+
+int octeon_init_consoles(struct octeon_device *oct)
+{
+ int ret = 0;
+ u64 addr, size;
+
+ ret = octeon_mem_access_ok(oct);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Memory access not okay'\n");
+ return ret;
+ }
+
+ ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr,
+ &size);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n",
+ OCTEON_PCI_CONSOLE_BLOCK_NAME);
+ return ret;
+ }
+
+ /* num_consoles > 0, is an indication that the consoles
+ * are accessible
+ */
+ oct->num_consoles = octeon_read_device_mem32(oct,
+ addr + offsetof(struct octeon_pci_console_desc,
+ num_consoles));
+ oct->console_desc_addr = addr;
+
+ dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n",
+ oct->num_consoles);
+
+ return ret;
+}
+
+int octeon_add_console(struct octeon_device *oct, u32 console_num)
+{
+ int ret = 0;
+ u32 delay;
+ u64 coreaddr;
+ struct delayed_work *work;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev,
+ "trying to read from console number %d when only 0 to %d exist\n",
+ console_num, oct->num_consoles);
+ } else {
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+
+ coreaddr = oct->console_desc_addr + console_num * 8 +
+ offsetof(struct octeon_pci_console_desc,
+ console_addr_array);
+ console->addr = octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ buf_size);
+ console->buffer_size = octeon_read_device_mem32(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ input_base_addr);
+ console->input_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ output_base_addr);
+ console->output_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ console->leftover[0] = '\0';
+
+ work = &oct->console_poll_work[console_num].work;
+
+ INIT_DELAYED_WORK(work, check_console);
+ oct->console_poll_work[console_num].ctxptr = (void *)oct;
+ oct->console_poll_work[console_num].ctxul = console_num;
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+ schedule_delayed_work(work, msecs_to_jiffies(delay));
+
+ if (octeon_console_debug_enabled(console_num)) {
+ ret = octeon_console_send_cmd(oct,
+ "setenv pci_console_active 1",
+ 2000);
+ }
+
+ console->active = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * Removes all consoles
+ *
+ * @param oct octeon device
+ */
+void octeon_remove_consoles(struct octeon_device *oct)
+{
+ u32 i;
+ struct octeon_console *console;
+
+ for (i = 0; i < oct->num_consoles; i++) {
+ console = &oct->console[i];
+
+ if (!console->active)
+ continue;
+
+ cancel_delayed_work_sync(&oct->console_poll_work[i].
+ work);
+ console->addr = 0;
+ console->buffer_size = 0;
+ console->input_base_addr = 0;
+ console->output_base_addr = 0;
+ }
+
+ oct->num_consoles = 0;
+}
+
+static inline int octeon_console_free_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size;
+}
+
+static inline int octeon_console_avail_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return buffer_size - 1 -
+ octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
+}
+
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size, u32 flags)
+{
+ int bytes_to_read;
+ u32 rd_idx, wr_idx;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n",
+ console_num);
+ return 0;
+ }
+
+ console = &oct->console[console_num];
+
+ /* Check to see if any data is available.
+ * Maybe optimize this with 64-bit read.
+ */
+ rd_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_read_index));
+ wr_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_write_index));
+
+ bytes_to_read = octeon_console_avail_bytes(console->buffer_size,
+ wr_idx, rd_idx);
+ if (bytes_to_read <= 0)
+ return bytes_to_read;
+
+ bytes_to_read = MIN(bytes_to_read, (s32)buf_size);
+
+ /* Check to see if what we want to read is not contiguous, and limit
+ * ourselves to the contiguous block
+ */
+ if (rd_idx + bytes_to_read >= console->buffer_size)
+ bytes_to_read = console->buffer_size - rd_idx;
+
+ octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
+ buffer, bytes_to_read);
+ octeon_write_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console,
+ output_read_index),
+ (rd_idx + bytes_to_read) %
+ console->buffer_size);
+
+ return bytes_to_read;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
new file mode 100644
index 000000000000..0d3106b464b2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -0,0 +1,1309 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/** Default configuration
+ * for CN66XX OCTEON Models.
+ */
+static struct octeon_config default_cn66xx_conf = {
+ .card_type = LIO_210SV,
+ .card_name = LIO_210SV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+
+static struct octeon_config default_cn68xx_conf = {
+ .card_type = LIO_410NV,
+ .card_name = LIO_410NV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .nic_if_cfg[2] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 2,
+ },
+
+ .nic_if_cfg[3] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 3,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+static struct octeon_config default_cn68xx_210nv_conf = {
+ .card_type = LIO_210NV,
+ .card_name = LIO_210NV_NAME,
+
+ /** IQ attributes */
+
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+enum {
+ OCTEON_CONFIG_TYPE_DEFAULT = 0,
+ NUM_OCTEON_CONFS,
+};
+
+static struct octeon_config_ptr {
+ u32 conf_type;
+} oct_conf_info[MAX_OCTEON_DEVICES] = {
+ {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ },
+};
+
+static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
+ "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
+ "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "INVALID"
+};
+
+static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
+ "BASE", "NIC", "UNKNOWN"};
+
+static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static u32 octeon_device_count;
+
+static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
+
+static void oct_set_config_info(int oct_id, int conf_type)
+{
+ if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
+ conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
+ oct_conf_info[oct_id].conf_type = conf_type;
+}
+
+void octeon_init_device_list(int conf_type)
+{
+ int i;
+
+ memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ oct_set_config_info(i, conf_type);
+}
+
+static void *__retrieve_octeon_config_info(struct octeon_device *oct,
+ u16 card_type)
+{
+ u32 oct_id = oct->octeon_id;
+ void *ret = NULL;
+
+ switch (oct_conf_info[oct_id].conf_type) {
+ case OCTEON_CONFIG_TYPE_DEFAULT:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ ret = (void *)&default_cn66xx_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_210NV)) {
+ ret = (void *)&default_cn68xx_210nv_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_410NV)) {
+ ret = (void *)&default_cn68xx_conf;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
+{
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX:
+ return lio_validate_cn6xxx_config_info(oct, conf);
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
+{
+ void *conf = NULL;
+
+ conf = __retrieve_octeon_config_info(oct, card_type);
+ if (!conf)
+ return NULL;
+
+ if (__verify_octeon_config_info(oct, conf)) {
+ dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return conf;
+}
+
+char *lio_get_state_string(atomic_t *state_ptr)
+{
+ s32 istate = (s32)atomic_read(state_ptr);
+
+ if (istate > OCT_DEV_STATES || istate < 0)
+ return oct_dev_state_str[OCT_DEV_STATE_INVALID];
+ return oct_dev_state_str[istate];
+}
+
+static char *get_oct_app_string(u32 app_mode)
+{
+ if (app_mode <= CVM_DRV_APP_END)
+ return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
+ return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
+}
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ int ret = 0;
+ u8 *p;
+ u8 *buffer;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ struct octeon_firmware_file_header *h;
+ u32 i;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result =
+ crc32(~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
+ LIQUIDIO_VERSION, h->version);
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ memcpy(buffer, data, size);
+
+ p = buffer + sizeof(struct octeon_firmware_file_header);
+
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ /* validate the image */
+ crc32_result = crc32(~0, p, image_len) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
+ dev_err(&oct->pci_dev->dev,
+ "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
+ i, crc32_result,
+ be32_to_cpu(h->desc[i].crc32));
+ ret = -EINVAL;
+ goto done_downloading;
+ }
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+
+ p += image_len;
+ dev_dbg(&oct->pci_dev->dev,
+ "Downloaded image %d (%d bytes) to address 0x%016llx\n",
+ i, image_len, load_addr);
+ }
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+
+done_downloading:
+ kfree(buffer);
+
+ return ret;
+}
+
+void octeon_free_device_mem(struct octeon_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
+ /* could check mask as well */
+ if (oct->droq[i])
+ vfree(oct->droq[i]);
+ }
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ /* could check mask as well */
+ if (oct->instr_queue[i])
+ vfree(oct->instr_queue[i]);
+ }
+
+ i = oct->octeon_id;
+ vfree(oct);
+
+ octeon_device[i] = NULL;
+ octeon_device_count--;
+}
+
+static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
+ u32 priv_size)
+{
+ struct octeon_device *oct;
+ u8 *buf = NULL;
+ u32 octdevsize = 0, configsize = 0, size;
+
+ switch (pci_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ configsize = sizeof(struct octeon_cn6xxx);
+ break;
+
+ default:
+ pr_err("%s: Unknown PCI Device: 0x%x\n",
+ __func__,
+ pci_id);
+ return NULL;
+ }
+
+ if (configsize & 0x7)
+ configsize += (8 - (configsize & 0x7));
+
+ octdevsize = sizeof(struct octeon_device);
+ if (octdevsize & 0x7)
+ octdevsize += (8 - (octdevsize & 0x7));
+
+ if (priv_size & 0x7)
+ priv_size += (8 - (priv_size & 0x7));
+
+ size = octdevsize + priv_size + configsize +
+ (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
+
+ buf = vmalloc(size);
+ if (!buf)
+ return NULL;
+
+ memset(buf, 0, size);
+
+ oct = (struct octeon_device *)buf;
+ oct->priv = (void *)(buf + octdevsize);
+ oct->chip = (void *)(buf + octdevsize + priv_size);
+ oct->dispatch.dlist = (struct octeon_dispatch *)
+ (buf + octdevsize + priv_size + configsize);
+
+ return oct;
+}
+
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size)
+{
+ u32 oct_idx = 0;
+ struct octeon_device *oct = NULL;
+
+ for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
+ if (!octeon_device[oct_idx])
+ break;
+
+ if (oct_idx == MAX_OCTEON_DEVICES)
+ return NULL;
+
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (!oct)
+ return NULL;
+
+ spin_lock_init(&oct->pci_win_lock);
+ spin_lock_init(&oct->mem_access_lock);
+
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+
+ oct->octeon_id = oct_idx;
+ snprintf((oct->device_name), sizeof(oct->device_name),
+ "LiquidIO%d", (oct->octeon_id));
+
+ return oct;
+}
+
+int octeon_setup_instr_queues(struct octeon_device *oct)
+{
+ u32 i, num_iqs = 0;
+ u32 num_descs = 0;
+
+ /* this causes queue 0 to be default queue */
+ if (OCTEON_CN6XXX(oct)) {
+ num_iqs = 1;
+ num_descs =
+ CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ oct->num_iqs = 0;
+
+ for (i = 0; i < num_iqs; i++) {
+ oct->instr_queue[i] =
+ vmalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[i])
+ return 1;
+
+ memset(oct->instr_queue[i], 0,
+ sizeof(struct octeon_instr_queue));
+
+ oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
+ if (octeon_init_instr_queue(oct, i, num_descs))
+ return 1;
+
+ oct->num_iqs++;
+ }
+
+ return 0;
+}
+
+int octeon_setup_output_queues(struct octeon_device *oct)
+{
+ u32 i, num_oqs = 0;
+ u32 num_descs = 0;
+ u32 desc_size = 0;
+
+ /* this causes queue 0 to be default queue */
+ if (OCTEON_CN6XXX(oct)) {
+ /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
+ num_oqs = 1;
+ num_descs =
+ CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
+ desc_size =
+ CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ oct->num_oqs = 0;
+
+ for (i = 0; i < num_oqs; i++) {
+ oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
+ if (!oct->droq[i])
+ return 1;
+
+ memset(oct->droq[i], 0, sizeof(struct octeon_droq));
+
+ if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
+ return 1;
+
+ oct->num_oqs++;
+ }
+
+ return 0;
+}
+
+void octeon_set_io_queues_off(struct octeon_device *oct)
+{
+ /* Disable the i/p and o/p queues for this Octeon. */
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+}
+
+void octeon_set_droq_pkt_op(struct octeon_device *oct,
+ u32 q_no,
+ u32 enable)
+{
+ u32 reg_val = 0;
+
+ /* Disable the i/p and o/p queues for this Octeon. */
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+}
+
+int octeon_init_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+
+ oct->dispatch.count = 0;
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ oct->dispatch.dlist[i].opcode = 0;
+ INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
+ }
+
+ for (i = 0; i <= REQTYPE_LAST; i++)
+ octeon_register_reqtype_free_fn(oct, i, NULL);
+
+ spin_lock_init(&oct->dispatch.lock);
+
+ return 0;
+}
+
+void octeon_delete_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+ struct list_head freelist, *temp, *tmp2;
+
+ INIT_LIST_HEAD(&freelist);
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ struct list_head *dispatch;
+
+ dispatch = &oct->dispatch.dlist[i].list;
+ while (dispatch->next != dispatch) {
+ temp = dispatch->next;
+ list_del(temp);
+ list_add_tail(temp, &freelist);
+ }
+
+ oct->dispatch.dlist[i].opcode = 0;
+ }
+
+ oct->dispatch.count = 0;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ list_for_each_safe(temp, tmp2, &freelist) {
+ list_del(temp);
+ vfree(temp);
+ }
+}
+
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode)
+{
+ u32 idx;
+ struct list_head *dispatch;
+ octeon_dispatch_fn_t fn = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn = ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn;
+}
+
+/* octeon_register_dispatch_fn
+ * Parameters:
+ * octeon_id - id of the octeon device.
+ * opcode - opcode for which driver should call the registered function
+ * subcode - subcode for which driver should call the registered function
+ * fn - The function to call when a packet with "opcode" arrives in
+ * octeon output queues.
+ * fn_arg - The argument to be passed when calling function "fn".
+ * Description:
+ * Registers a function and its argument to be called when a packet
+ * arrives in Octeon output queues with "opcode".
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg)
+{
+ u32 idx;
+ octeon_dispatch_fn_t pfn;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+ /* Add dispatch function to first level of lookup table */
+ if (oct->dispatch.dlist[idx].opcode == 0) {
+ oct->dispatch.dlist[idx].opcode = combined_opcode;
+ oct->dispatch.dlist[idx].dispatch_fn = fn;
+ oct->dispatch.dlist[idx].arg = fn_arg;
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+ return 0;
+ }
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ /* Check if there was a function already registered for this
+ * opcode/subcode.
+ */
+ pfn = octeon_get_dispatch(oct, opcode, subcode);
+ if (!pfn) {
+ struct octeon_dispatch *dispatch;
+
+ dev_dbg(&oct->pci_dev->dev,
+ "Adding opcode to dispatch list linked list\n");
+ dispatch = (struct octeon_dispatch *)
+ vmalloc(sizeof(struct octeon_dispatch));
+ if (!dispatch) {
+ dev_err(&oct->pci_dev->dev,
+ "No memory to add dispatch function\n");
+ return 1;
+ }
+ dispatch->opcode = combined_opcode;
+ dispatch->dispatch_fn = fn;
+ dispatch->arg = fn_arg;
+
+ /* Add dispatch function to linked list of fn ptrs
+ * at the hashed index.
+ */
+ spin_lock_bh(&oct->dispatch.lock);
+ list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
+ opcode, subcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* octeon_unregister_dispatch_fn
+ * Parameters:
+ * oct - octeon device
+ * opcode - driver should unregister the function for this opcode
+ * subcode - driver should unregister the function for this subcode
+ * Description:
+ * Unregister the function set for this opcode+subcode.
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
+ u16 subcode)
+{
+ int retval = 0;
+ u32 idx;
+ struct list_head *dispatch, *dfree = NULL, *tmp2;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ if (oct->dispatch.count == 0) {
+ spin_unlock_bh(&oct->dispatch.lock);
+ dev_err(&oct->pci_dev->dev,
+ "No dispatch functions registered for this device\n");
+ return 1;
+ }
+
+ if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
+ dispatch = &oct->dispatch.dlist[idx].list;
+ if (dispatch->next != dispatch) {
+ dispatch = dispatch->next;
+ oct->dispatch.dlist[idx].opcode =
+ ((struct octeon_dispatch *)dispatch)->opcode;
+ oct->dispatch.dlist[idx].dispatch_fn =
+ ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ oct->dispatch.dlist[idx].arg =
+ ((struct octeon_dispatch *)dispatch)->arg;
+ list_del(dispatch);
+ dfree = dispatch;
+ } else {
+ oct->dispatch.dlist[idx].opcode = 0;
+ oct->dispatch.dlist[idx].dispatch_fn = NULL;
+ oct->dispatch.dlist[idx].arg = NULL;
+ }
+ } else {
+ retval = 1;
+ list_for_each_safe(dispatch, tmp2,
+ &(oct->dispatch.dlist[idx].
+ list)) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ list_del(dispatch);
+ dfree = dispatch;
+ retval = 0;
+ }
+ }
+ }
+
+ if (!retval)
+ oct->dispatch.count--;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ if (dfree)
+ vfree(dfree);
+
+ return retval;
+}
+
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
+{
+ u32 i;
+ char app_name[16];
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ struct octeon_core_setup *cs = NULL;
+ u32 num_nic_ports = 0;
+
+ if (OCTEON_CN6XXX(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
+
+ if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
+ dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
+ atomic_read(&oct->status));
+ goto core_drv_init_err;
+ }
+
+ strncpy(app_name,
+ get_oct_app_string(
+ (u32)recv_pkt->rh.r_core_drv_init.app_mode),
+ sizeof(app_name) - 1);
+ oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
+ oct->fw_info.max_nic_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
+ oct->fw_info.num_gmx_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
+ }
+
+ if (oct->fw_info.max_nic_ports < num_nic_ports) {
+ dev_err(&oct->pci_dev->dev,
+ "Config has more ports than firmware allows (%d > %d).\n",
+ num_nic_ports, oct->fw_info.max_nic_ports);
+ goto core_drv_init_err;
+ }
+ oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
+ oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+ cs = &core_setup[oct->octeon_id];
+
+ if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
+ dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
+ (u32)sizeof(*cs),
+ recv_pkt->buffer_size[0]);
+ }
+
+ memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
+ strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
+ strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
+ OCT_SERIAL_LEN);
+
+ octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
+
+ oct->boardinfo.major = cs->board_rev_major;
+ oct->boardinfo.minor = cs->board_rev_minor;
+
+ dev_info(&oct->pci_dev->dev,
+ "Running %s (%llu Hz)\n",
+ app_name, CVM_CAST64(cs->corefreq));
+
+core_drv_init_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
+
+{
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
+ (oct->io_qmask.iq & (1UL << q_no)))
+ return oct->instr_queue[q_no]->max_count;
+
+ return -1;
+}
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
+{
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
+ (oct->io_qmask.oq & (1UL << q_no)))
+ return oct->droq[q_no]->max_count;
+ return -1;
+}
+
+/* Retruns the host firmware handshake OCTEON specific configuration */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct)
+{
+ struct octeon_config *default_oct_conf = NULL;
+
+ /* check the OCTEON Device model & return the corresponding octeon
+ * configuration
+ */
+
+ if (OCTEON_CN6XXX(oct)) {
+ default_oct_conf =
+ (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
+ }
+
+ return default_oct_conf;
+}
+
+/* scratch register address is same in all the OCT-II and CN70XX models */
+#define CNXX_SLI_SCRATCH1 0x3C0
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id)
+{
+ if (octeon_id >= MAX_OCTEON_DEVICES)
+ return NULL;
+ else
+ return octeon_device[octeon_id];
+}
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
+{
+ u64 val64;
+ unsigned long flags;
+ u32 val32, addrhi;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ /* The windowed read happens when the LSB of the addr is written.
+ * So write MSB first
+ */
+ addrhi = (addr >> 32);
+ if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
+ addrhi |= 0x00060000;
+ writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
+
+ /* Read back to preserve ordering of writes */
+ val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+
+ writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
+ val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+
+ val64 = readq(oct->reg_list.pci_win_rd_data);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+
+ return val64;
+}
+
+void lio_pci_writeq(struct octeon_device *oct,
+ u64 val,
+ u64 addr)
+{
+ u32 val32;
+ unsigned long flags;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ writeq(addr, oct->reg_list.pci_win_wr_addr);
+
+ /* The write happens when the LSB is written. So write MSB first. */
+ writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
+ /* Read the MSB to ensure ordering of writes. */
+ val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+
+ writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+}
+
+int octeon_mem_access_ok(struct octeon_device *oct)
+{
+ u64 access_okay = 0;
+
+ /* Check to make sure a DDR interface is enabled */
+ u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+
+ access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+
+ return access_okay ? 0 : 1;
+}
+
+int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
+{
+ int ret = 1;
+ u32 ms;
+
+ if (!timeout)
+ return ret;
+
+ while (*timeout == 0)
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
+ ms += HZ / 10) {
+ ret = octeon_mem_access_ok(oct);
+
+ /* wait 100 ms */
+ if (ret)
+ schedule_timeout_uninterruptible(HZ / 10);
+ }
+
+ return ret;
+}
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev)
+{
+ struct octeon_device *octeon_dev = (struct octeon_device *)dev;
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ if (octeon_device[i] == octeon_dev)
+ return octeon_dev->octeon_id;
+ return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
new file mode 100644
index 000000000000..36e1f85df8c4
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -0,0 +1,649 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+
+/*! \file octeon_device.h
+ * \brief Host Driver: This file defines the octeon device structure.
+ */
+
+#ifndef _OCTEON_DEVICE_H_
+#define _OCTEON_DEVICE_H_
+
+/** PCI VendorId Device Id */
+#define OCTEON_CN68XX_PCIID 0x91177d
+#define OCTEON_CN66XX_PCIID 0x92177d
+
+/** Driver identifies chips by these Ids, created by clubbing together
+ * DeviceId+RevisionId; Where Revision Id is not used to distinguish
+ * between chips, a value of 0 is used for revision id.
+ */
+#define OCTEON_CN68XX 0x0091
+#define OCTEON_CN66XX 0x0092
+
+/** Endian-swap modes supported by Octeon. */
+enum octeon_pci_swap_mode {
+ OCTEON_PCI_PASSTHROUGH = 0,
+ OCTEON_PCI_64BIT_SWAP = 1,
+ OCTEON_PCI_32BIT_BYTE_SWAP = 2,
+ OCTEON_PCI_32BIT_LW_SWAP = 3
+};
+
+/*--------------- PCI BAR1 index registers -------------*/
+
+/* BAR1 Mask */
+#define PCI_BAR1_ENABLE_CA 1
+#define PCI_BAR1_ENDIAN_MODE OCTEON_PCI_64BIT_SWAP
+#define PCI_BAR1_ENTRY_VALID 1
+#define PCI_BAR1_MASK ((PCI_BAR1_ENABLE_CA << 3) \
+ | (PCI_BAR1_ENDIAN_MODE << 1) \
+ | PCI_BAR1_ENTRY_VALID)
+
+/** Octeon Device state.
+ * Each octeon device goes through each of these states
+ * as it is initialized.
+ */
+#define OCT_DEV_BEGIN_STATE 0x0
+#define OCT_DEV_PCI_MAP_DONE 0x1
+#define OCT_DEV_DISPATCH_INIT_DONE 0x2
+#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3
+#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4
+#define OCT_DEV_RESP_LIST_INIT_DONE 0x5
+#define OCT_DEV_DROQ_INIT_DONE 0x6
+#define OCT_DEV_IO_QUEUES_DONE 0x7
+#define OCT_DEV_CONSOLE_INIT_DONE 0x8
+#define OCT_DEV_HOST_OK 0x9
+#define OCT_DEV_CORE_OK 0xa
+#define OCT_DEV_RUNNING 0xb
+#define OCT_DEV_IN_RESET 0xc
+#define OCT_DEV_STATE_INVALID 0xd
+
+#define OCT_DEV_STATES OCT_DEV_STATE_INVALID
+
+/** Octeon Device interrupts
+ * These interrupt bits are set in int_status filed of
+ * octeon_device structure
+ */
+#define OCT_DEV_INTR_DMA0_FORCE 0x01
+#define OCT_DEV_INTR_DMA1_FORCE 0x02
+#define OCT_DEV_INTR_PKT_DATA 0x04
+
+#define LIO_RESET_SECS (3)
+
+/*---------------------------DISPATCH LIST-------------------------------*/
+
+/** The dispatch list entry.
+ * The driver keeps a record of functions registered for each
+ * response header opcode in this structure. Since the opcode is
+ * hashed to index into the driver's list, more than one opcode
+ * can hash to the same entry, in which case the list field points
+ * to a linked list with the other entries.
+ */
+struct octeon_dispatch {
+ /** List head for this entry */
+ struct list_head list;
+
+ /** The opcode for which the dispatch function & arg should be used */
+ u16 opcode;
+
+ /** The function to be called for a packet received by the driver */
+ octeon_dispatch_fn_t dispatch_fn;
+
+ /* The application specified argument to be passed to the above
+ * function along with the received packet
+ */
+ void *arg;
+};
+
+/** The dispatch list structure. */
+struct octeon_dispatch_list {
+ /** access to dispatch list must be atomic */
+ spinlock_t lock;
+
+ /** Count of dispatch functions currently registered */
+ u32 count;
+
+ /** The list of dispatch functions */
+ struct octeon_dispatch *dlist;
+};
+
+/*----------------------- THE OCTEON DEVICE ---------------------------*/
+
+#define OCT_MEM_REGIONS 3
+/** PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octeon_mmio {
+ /** PCI address to which the BAR is mapped. */
+ u64 start;
+
+ /** Length of this PCI address space. */
+ u32 len;
+
+ /** Length that has been mapped to phys. address space. */
+ u32 mapped_len;
+
+ /** The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /** Flag indicating the mapping was successful. */
+ u32 done;
+};
+
+#define MAX_OCTEON_MAPS 32
+
+struct octeon_io_enable {
+ u32 iq;
+ u32 oq;
+ u32 iq64B;
+};
+
+struct octeon_reg_list {
+ u32 __iomem *pci_win_wr_addr_hi;
+ u32 __iomem *pci_win_wr_addr_lo;
+ u64 __iomem *pci_win_wr_addr;
+
+ u32 __iomem *pci_win_rd_addr_hi;
+ u32 __iomem *pci_win_rd_addr_lo;
+ u64 __iomem *pci_win_rd_addr;
+
+ u32 __iomem *pci_win_wr_data_hi;
+ u32 __iomem *pci_win_wr_data_lo;
+ u64 __iomem *pci_win_wr_data;
+
+ u32 __iomem *pci_win_rd_data_hi;
+ u32 __iomem *pci_win_rd_data_lo;
+ u64 __iomem *pci_win_rd_data;
+};
+
+#define OCTEON_CONSOLE_MAX_READ_BYTES 512
+struct octeon_console {
+ u32 active;
+ u32 waiting;
+ u64 addr;
+ u32 buffer_size;
+ u64 input_base_addr;
+ u64 output_base_addr;
+ char leftover[OCTEON_CONSOLE_MAX_READ_BYTES];
+};
+
+struct octeon_board_info {
+ char name[OCT_BOARD_NAME];
+ char serial_number[OCT_SERIAL_LEN];
+ u64 major;
+ u64 minor;
+};
+
+struct octeon_fn_list {
+ void (*setup_iq_regs)(struct octeon_device *, u32);
+ void (*setup_oq_regs)(struct octeon_device *, u32);
+
+ irqreturn_t (*process_interrupt_regs)(void *);
+ int (*soft_reset)(struct octeon_device *);
+ int (*setup_device_regs)(struct octeon_device *);
+ void (*reinit_regs)(struct octeon_device *);
+ void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
+ void (*bar1_idx_write)(struct octeon_device *, u32, u32);
+ u32 (*bar1_idx_read)(struct octeon_device *, u32);
+ u32 (*update_iq_read_idx)(struct octeon_device *,
+ struct octeon_instr_queue *);
+
+ void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
+ void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
+
+ void (*enable_interrupt)(void *);
+ void (*disable_interrupt)(void *);
+
+ void (*enable_io_queues)(struct octeon_device *);
+ void (*disable_io_queues)(struct octeon_device *);
+};
+
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Structure for named memory blocks
+ * Number of descriptors
+ * available can be changed without affecting compatiblity,
+ * but name length changes require a bump in the bootmem
+ * descriptor version
+ * Note: This structure must be naturally 64 bit aligned, as a single
+ * memory image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ /** Base address of named block */
+ u64 base_addr;
+
+ /** Size actually allocated for named block */
+ u64 size;
+
+ /** name of named block */
+ char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+struct oct_fw_info {
+ u32 max_nic_ports; /** max nic ports for the device */
+ u32 num_gmx_ports; /** num gmx ports */
+ u64 app_cap_flags; /** firmware cap flags */
+
+ /** The core application is running in this mode.
+ * See octeon-drv-opcodes.h for values.
+ */
+ u32 app_mode;
+ char liquidio_firmware_version[32];
+};
+
+/* wrappers around work structs */
+struct cavium_wk {
+ struct delayed_work work;
+ void *ctxptr;
+ size_t ctxul;
+};
+
+struct cavium_wq {
+ struct workqueue_struct *wq;
+ struct cavium_wk wk;
+};
+
+struct octdev_props {
+ /* Each interface in the Octeon device has a network
+ * device pointer (used for OS specific calls).
+ */
+ struct net_device *netdev;
+};
+
+/** The Octeon device.
+ * Each Octeon device has this structure to represent all its
+ * components.
+ */
+struct octeon_device {
+ /** Lock for PCI window configuration accesses */
+ spinlock_t pci_win_lock;
+
+ /** Lock for memory accesses */
+ spinlock_t mem_access_lock;
+
+ /** PCI device pointer */
+ struct pci_dev *pci_dev;
+
+ /** Chip specific information. */
+ void *chip;
+
+ /** Number of interfaces detected in this octeon device. */
+ u32 ifcount;
+
+ struct octdev_props props[MAX_OCTEON_LINKS];
+
+ /** Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /** This device's id - set by the driver. */
+ u32 octeon_id;
+
+ /** This device's PCIe port used for traffic. */
+ u16 pcie_port;
+
+ u16 flags;
+#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2)
+
+ /** The state of this device */
+ atomic_t status;
+
+ /** memory mapped io range */
+ struct octeon_mmio mmio[OCT_MEM_REGIONS];
+
+ struct octeon_reg_list reg_list;
+
+ struct octeon_fn_list fn_list;
+
+ struct octeon_board_info boardinfo;
+
+ u32 num_iqs;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct octeon_sc_buffer_pool sc_buf_pool;
+
+ /** The input instruction queues */
+ struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+
+ /** The doubly-linked list of instruction response */
+ struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
+
+ u32 num_oqs;
+
+ /** The DROQ output queues */
+ struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+
+ struct octeon_io_enable io_qmask;
+
+ /** List of dispatch functions */
+ struct octeon_dispatch_list dispatch;
+
+ /* Interrupt Moderation */
+ struct oct_intrmod_cfg intrmod;
+
+ u32 int_status;
+
+ u64 droq_intr;
+
+ /** Physical location of the cvmx_bootmem_desc_t in octeon memory */
+ u64 bootmem_desc_addr;
+
+ /** Placeholder memory for named blocks.
+ * Assumes single-threaded access
+ */
+ struct cvmx_bootmem_named_block_desc bootmem_named_block_desc;
+
+ /** Address of consoles descriptor */
+ u64 console_desc_addr;
+
+ /** Number of consoles available. 0 means they are inaccessible */
+ u32 num_consoles;
+
+ /* Console caches */
+ struct octeon_console console[MAX_OCTEON_MAPS];
+
+ /* Coprocessor clock rate. */
+ u64 coproc_clock_rate;
+
+ /** The core application is running in this mode. See liquidio_common.h
+ * for values.
+ */
+ u32 app_mode;
+
+ struct oct_fw_info fw_info;
+
+ /** The name given to this device. */
+ char device_name[32];
+
+ /** Application Context */
+ void *app_ctx;
+
+ struct cavium_wq dma_comp_wq;
+
+ struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+
+ struct cavium_wk nic_poll_work;
+
+ struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
+
+ void *priv;
+};
+
+#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
+ (oct->chip_id == OCTEON_CN68XX))
+#define CHIP_FIELD(oct, TYPE, field) \
+ (((struct octeon_ ## TYPE *)(oct->chip))->field)
+
+struct oct_intrmod_cmd {
+ struct octeon_device *oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cfg *cfg;
+};
+
+/*------------------ Function Prototypes ----------------------*/
+
+/** Initialize device list memory */
+void octeon_init_device_list(int conf_type);
+
+/** Free memory for Input and Output queue structures for a octeon device */
+void octeon_free_device_mem(struct octeon_device *);
+
+/* Look up a free entry in the octeon_device table and allocate resources
+ * for the octeon_device structure for an octeon device. Called at init
+ * time.
+ */
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size);
+
+/** Initialize the driver's dispatch list which is a mix of a hash table
+ * and a linked list. This is done at driver load time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return 0 on success, else -ve error value
+ */
+int octeon_init_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Delete the driver's dispatch list and all registered entries.
+ * This is done at driver unload time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ */
+void octeon_delete_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Initialize the core device fields with the info returned by the FW.
+ * @param recv_info - Receive info structure
+ * @param buf - Receive buffer
+ */
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf);
+
+/** Gets the dispatch function registered to receive packets with a
+ * given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch function
+ * is to checked.
+ * @param subcode - the subcode for which the dispatch function
+ * is to checked.
+ *
+ * @return Success: octeon_dispatch_fn_t (dispatch function pointer)
+ * @return Failure: NULL
+ *
+ * Looks up the dispatch list to get the dispatch function for a
+ * given opcode.
+ */
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode);
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id);
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev);
+
+static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct)
+{
+ return oct->rev_id & 0x3;
+}
+
+/** Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr);
+
+/** Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param val - Value to write
+ * @param addr - Address of the register to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr);
+
+/* Routines for reading and writing CSRs */
+#define octeon_write_csr(oct_dev, reg_off, value) \
+ writel(value, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_write_csr64(oct_dev, reg_off, val64) \
+ writeq(val64, oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_read_csr(oct_dev, reg_off) \
+ readl(oct_dev->mmio[0].hw_addr + reg_off)
+
+#define octeon_read_csr64(oct_dev, reg_off) \
+ readq(oct_dev->mmio[0].hw_addr + reg_off)
+
+/**
+ * Checks if memory access is okay
+ *
+ * @param oct which octeon to send to
+ * @return Zero on success, negative on failure.
+ */
+int octeon_mem_access_ok(struct octeon_device *oct);
+
+/**
+ * Waits for DDR initialization.
+ *
+ * @param oct which octeon to send to
+ * @param timeout_in_ms pointer to how long to wait until DDR is initialized
+ * in ms.
+ * If contents are 0, it waits until contents are non-zero
+ * before starting to check.
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_ddr_init(struct octeon_device *oct,
+ u32 *timeout_in_ms);
+
+/**
+ * Wait for u-boot to boot and be waiting for a command.
+ *
+ * @param wait_time_hundredths
+ * Maximum time to wait
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths);
+
+/**
+ * Initialize console access
+ *
+ * @param oct which octeon initialize
+ * @return Zero on success, negative on failure.
+ */
+int octeon_init_consoles(struct octeon_device *oct);
+
+/**
+ * Adds access to a console to the device.
+ *
+ * @param oct which octeon to add to
+ * @param console_num which console
+ * @return Zero on success, negative on failure.
+ */
+int octeon_add_console(struct octeon_device *oct, u32 console_num);
+
+/** write or read from a console */
+int octeon_console_write(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 write_request_size, u32 flags);
+int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
+int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size, u32 flags);
+int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
+
+/** Removes all attached consoles. */
+void octeon_remove_consoles(struct octeon_device *oct);
+
+/**
+ * Send a string to u-boot on console 0 as a command.
+ *
+ * @param oct which octeon to send to
+ * @param cmd_str String to send
+ * @param wait_hundredths Time to wait for u-boot to accept the command.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths);
+
+/** Parses, validates, and downloads firmware, then boots associated cores.
+ * @param oct which octeon to download firmware to
+ * @param data - The complete firmware file image
+ * @param size - The size of the data
+ *
+ * @return 0 if success.
+ * -EINVAL if file is incompatible or badly formatted.
+ * -ENODEV if no handler was found for the application type or an
+ * invalid octeon id was passed.
+ */
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size);
+
+char *lio_get_state_string(atomic_t *state_ptr);
+
+/** Sets up instruction queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_instr_queues(struct octeon_device *oct);
+
+/** Sets up output queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_output_queues(struct octeon_device *oct);
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
+
+/** Turns off the input and output queues for the device
+ * @param oct which octeon to disable
+ */
+void octeon_set_io_queues_off(struct octeon_device *oct);
+
+/** Turns on or off the given output queue for the device
+ * @param oct which octeon to change
+ * @param q_no which queue
+ * @param enable 1 to enable, 0 to disable
+ */
+void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
+
+/** Retrieve the config for the device
+ * @param oct which octeon
+ * @param card_type type of card
+ *
+ * @returns pointer to configuration
+ */
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
+
+/** Gets the octeon device configuration
+ * @return - pointer to the octeon configuration struture
+ */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
new file mode 100644
index 000000000000..94b502a0cf33
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -0,0 +1,989 @@
+/**********************************************************************
+* Author: Cavium, Inc.
+*
+* Contact: support@cavium.com
+* Please include "LiquidIO" in the subject.
+*
+* Copyright (c) 2003-2015 Cavium, Inc.
+*
+* This file is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License, Version 2, as
+* published by the Free Software Foundation.
+*
+* This file is distributed in the hope that it will be useful, but
+* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+* NONINFRINGEMENT. See the GNU General Public License for more
+* details.
+*
+* This file may also be available under a different license from Cavium.
+* Contact Cavium, Inc. for more information
+**********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+/* #define CAVIUM_ONLY_PERF_MODE */
+
+#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
+#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
+
+struct niclist {
+ struct list_head list;
+ void *ptr;
+};
+
+struct __dispatch {
+ struct list_head list;
+ struct octeon_recv_info *rinfo;
+ octeon_dispatch_fn_t disp_fn;
+};
+
+/** Get the argument that the user set when registering dispatch
+ * function for a given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch argument
+ * is to be checked.
+ * @param subcode - the subcode for which the dispatch argument
+ * is to be checked.
+ * @return Success: void * (argument to the dispatch function)
+ * @return Failure: NULL
+ *
+ */
+static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
+ u16 opcode, u16 subcode)
+{
+ int idx;
+ struct list_head *dispatch;
+ void *fn_arg = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn_arg = octeon_dev->dispatch.dlist[idx].arg;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn_arg = ((struct octeon_dispatch *)
+ dispatch)->arg;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn_arg;
+}
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 pkt_count = 0;
+
+ pkt_count = readl(droq->pkts_sent_reg);
+ if (pkt_count) {
+ atomic_add(pkt_count, &droq->pkts_pending);
+ writel(pkt_count, droq->pkts_sent_reg);
+ }
+
+ return pkt_count;
+}
+
+static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
+{
+ u32 count = 0;
+
+ /* max_empty_descs is the max. no. of descs that can have no buffers.
+ * If the empty desc count goes beyond this value, we cannot safely
+ * read in a 64K packet sent by Octeon
+ * (64K is max pkt size from Octeon)
+ */
+ droq->max_empty_descs = 0;
+
+ do {
+ droq->max_empty_descs++;
+ count += droq->buffer_size;
+ } while (count < (64 * 1024));
+
+ droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
+}
+
+static void octeon_droq_reset_indices(struct octeon_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ atomic_set(&droq->pkts_pending, 0);
+}
+
+static void
+octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+
+ for (i = 0; i < droq->max_count; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ if (droq->desc_ring) {
+ lio_unmap_ring_info(oct->pci_dev,
+ (u64)droq->
+ desc_ring[i].info_ptr,
+ OCT_DROQ_INFO_SIZE);
+ lio_unmap_ring(oct->pci_dev,
+ (u64)droq->desc_ring[i].
+ buffer_ptr,
+ droq->buffer_size);
+ }
+ recv_buffer_free(droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ octeon_droq_reset_indices(droq);
+}
+
+static int
+octeon_droq_setup_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+ void *buf;
+ struct octeon_droq_desc *desc_ring = droq->desc_ring;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+
+ if (!buf) {
+ dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->recv_buf_list[i].data = get_rbd(buf);
+
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(oct->pci_dev,
+ droq->recv_buf_list[i].buffer,
+ droq->buffer_size);
+ }
+
+ octeon_droq_reset_indices(droq);
+
+ octeon_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
+{
+ struct octeon_droq *droq = oct->droq[q_no];
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ octeon_droq_destroy_ring_buffers(oct, droq);
+
+ if (droq->recv_buf_list)
+ vfree(droq->recv_buf_list);
+
+ if (droq->info_base_addr)
+ cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
+ droq->info_alloc_size,
+ droq->info_base_addr,
+ droq->info_list_dma);
+
+ if (droq->desc_ring)
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ return 0;
+}
+
+int octeon_init_droq(struct octeon_device *oct,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx)
+{
+ struct octeon_droq *droq;
+ u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
+ u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ droq = oct->droq[q_no];
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ droq->oct_dev = oct;
+ droq->q_no = q_no;
+ if (app_ctx)
+ droq->app_ctx = app_ctx;
+ else
+ droq->app_ctx = (void *)(size_t)q_no;
+
+ c_num_descs = num_descs;
+ c_buf_size = desc_size;
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ }
+
+ droq->max_count = c_num_descs;
+ droq->buffer_size = c_buf_size;
+
+ desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
+
+ if (!droq->desc_ring) {
+ dev_err(&oct->pci_dev->dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, droq->desc_ring_dma);
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->max_count);
+
+ droq->info_list =
+ cnnic_alloc_aligned_dma(oct->pci_dev,
+ (droq->max_count * OCT_DROQ_INFO_SIZE),
+ &droq->info_alloc_size,
+ &droq->info_base_addr,
+ &droq->info_list_dma);
+
+ if (!droq->info_list) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+ return 1;
+ }
+
+ droq->recv_buf_list = (struct octeon_recv_buffer *)
+ vmalloc(droq->max_count *
+ OCT_DROQ_RECVBUF_SIZE);
+ if (!droq->recv_buf_list) {
+ dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (octeon_droq_setup_ring_buffers(oct, droq))
+ goto init_droq_fail;
+
+ droq->pkts_per_intr = c_pkts_per_intr;
+ droq->refill_threshold = c_refill_threshold;
+
+ dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
+ droq->max_empty_descs);
+
+ spin_lock_init(&droq->lock);
+
+ INIT_LIST_HEAD(&droq->dispatch_list);
+
+ /* For 56xx Pass1, this function won't be called, so no checks. */
+ oct->fn_list.setup_oq_regs(oct, q_no);
+
+ oct->io_qmask.oq |= (1 << q_no);
+
+ return 0;
+
+init_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return 1;
+}
+
+/* octeon_create_recv_info
+ * Parameters:
+ * octeon_dev - pointer to the octeon device structure
+ * droq - droq in which the packet arrived.
+ * buf_cnt - no. of buffers used by the packet.
+ * idx - index in the descriptor for the first buffer in the packet.
+ * Description:
+ * Allocates a recv_info_t and copies the buffer addresses for packet data
+ * into the recv_pkt space which starts at an 8B offset from recv_info_t.
+ * Flags the descriptors for refill later. If available descriptors go
+ * below the threshold to receive a 64K pkt, new buffers are first allocated
+ * before the recv_pkt_t is created.
+ * This routine will be called in interrupt context.
+ * Returns:
+ * Success: Pointer to recv_info_t
+ * Failure: NULL.
+ * Locks:
+ * The droq->lock is held when this routine is called.
+ */
+static inline struct octeon_recv_info *octeon_create_recv_info(
+ struct octeon_device *octeon_dev,
+ struct octeon_droq *droq,
+ u32 buf_cnt,
+ u32 idx)
+{
+ struct octeon_droq_info *info;
+ struct octeon_recv_pkt *recv_pkt;
+ struct octeon_recv_info *recv_info;
+ u32 i, bytes_left;
+
+ info = &droq->info_list[idx];
+
+ recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
+ if (!recv_info)
+ return NULL;
+
+ recv_pkt = recv_info->recv_pkt;
+ recv_pkt->rh = info->rh;
+ recv_pkt->length = (u32)info->length;
+ recv_pkt->buffer_count = (u16)buf_cnt;
+ recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
+
+ i = 0;
+ bytes_left = (u32)info->length;
+
+ while (buf_cnt) {
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)droq->desc_ring[idx].buffer_ptr,
+ droq->buffer_size);
+
+ recv_pkt->buffer_size[i] =
+ (bytes_left >=
+ droq->buffer_size) ? droq->buffer_size : bytes_left;
+
+ recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
+ droq->recv_buf_list[idx].buffer = NULL;
+
+ INCR_INDEX_BY1(idx, droq->max_count);
+ bytes_left -= droq->buffer_size;
+ i++;
+ buf_cnt--;
+ }
+
+ return recv_info;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline u32
+octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
+ struct octeon_droq_desc *desc_ring)
+{
+ u32 desc_refilled = 0;
+
+ u32 refill_index = droq->refill_idx;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ droq->recv_buf_list[droq->refill_idx].data =
+ droq->recv_buf_list[refill_index].data;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ INCR_INDEX_BY1(droq->refill_idx,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].
+ buffer);
+ }
+ INCR_INDEX_BY1(refill_index, droq->max_count);
+ } /* while */
+ return desc_refilled;
+}
+
+/* octeon_droq_refill
+ * Parameters:
+ * droq - droq in which descriptors require new buffers.
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ * Returns:
+ * No of descriptors refilled.
+ * Locks:
+ * This routine is called with droq->lock held.
+ */
+static u32
+octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
+{
+ struct octeon_droq_desc *desc_ring;
+ void *buf = NULL;
+ u8 *data;
+ u32 desc_refilled = 0;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse
+ * the buffer, else allocate.
+ */
+ if (!droq->recv_buf_list[droq->refill_idx].buffer) {
+ buf = recv_buffer_alloc(octeon_dev, droq->q_no,
+ droq->buffer_size);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (!buf)
+ break;
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ buf;
+ data = get_rbd(buf);
+ } else {
+ data = get_rbd(droq->recv_buf_list
+ [droq->refill_idx].buffer);
+ }
+
+ droq->recv_buf_list[droq->refill_idx].data = data;
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(octeon_dev->pci_dev,
+ droq->recv_buf_list[droq->
+ refill_idx].buffer,
+ droq->buffer_size);
+
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ INCR_INDEX_BY1(droq->refill_idx, droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled +=
+ octeon_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+static inline u32
+octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
+{
+ u32 buf_cnt = 0;
+
+ while (total_len > (buf_size * buf_cnt))
+ buf_cnt++;
+ return buf_cnt;
+}
+
+static int
+octeon_droq_dispatch_pkt(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ union octeon_rh *rh,
+ struct octeon_droq_info *info)
+{
+ u32 cnt;
+ octeon_dispatch_fn_t disp_fn;
+ struct octeon_recv_info *rinfo;
+
+ cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
+
+ disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
+ (u16)rh->r.subcode);
+ if (disp_fn) {
+ rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
+ if (rinfo) {
+ struct __dispatch *rdisp = rinfo->rsvd;
+
+ rdisp->rinfo = rinfo;
+ rdisp->disp_fn = disp_fn;
+ rinfo->recv_pkt->rh = *rh;
+ list_add_tail(&rdisp->list,
+ &droq->dispatch_list);
+ } else {
+ droq->stats.dropped_nomem++;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+ droq->stats.dropped_nodispatch++;
+ } /* else (dispatch_fn ... */
+
+ return cnt;
+}
+
+static inline void octeon_droq_drop_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 cnt)
+{
+ u32 i = 0, buf_cnt;
+ struct octeon_droq_info *info;
+
+ for (i = 0; i < cnt; i++) {
+ info = &droq->info_list[droq->read_idx];
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (info->length) {
+ info->length -= OCT_RH_SIZE;
+ droq->stats.bytes_received += info->length;
+ buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
+ (u32)info->length);
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
+ buf_cnt = 1;
+ }
+
+ INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ }
+}
+
+static u32
+octeon_droq_fast_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 pkts_to_process)
+{
+ struct octeon_droq_info *info;
+ union octeon_rh *rh;
+ u32 pkt, total_len = 0, pkt_count;
+
+ pkt_count = pkts_to_process;
+
+ for (pkt = 0; pkt < pkt_count; pkt++) {
+ u32 pkt_len = 0;
+ struct sk_buff *nicbuf = NULL;
+
+ info = &droq->info_list[droq->read_idx];
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (!info->length) {
+ dev_err(&oct->pci_dev->dev,
+ "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ droq->q_no, droq->read_idx, pkt_count);
+ print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
+ (u8 *)info,
+ OCT_DROQ_INFO_SIZE);
+ break;
+ }
+
+ /* Len of resp hdr in included in the received data len. */
+ info->length -= OCT_RH_SIZE;
+ rh = &info->rh;
+
+ total_len += (u32)info->length;
+
+ if (OPCODE_SLOW_PATH(rh)) {
+ u32 buf_cnt;
+
+ buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
+ INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ lio_unmap_ring(oct->pci_dev,
+ (u64)droq->desc_ring[
+ droq->read_idx].buffer_ptr,
+ droq->buffer_size);
+ pkt_len = (u32)info->length;
+ nicbuf = droq->recv_buf_list[
+ droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+ INCR_INDEX_BY1(droq->read_idx, droq->max_count);
+ skb_put(nicbuf, pkt_len);
+ droq->refill_count++;
+ } else {
+ nicbuf = octeon_fast_packet_alloc(oct, droq,
+ droq->q_no,
+ (u32)
+ info->length);
+ pkt_len = 0;
+ /* nicbuf allocation can fail. We'll handle it
+ * inside the loop.
+ */
+ while (pkt_len < info->length) {
+ int cpy_len;
+
+ cpy_len = ((pkt_len +
+ droq->buffer_size) >
+ info->length) ?
+ ((u32)info->length - pkt_len) :
+ droq->buffer_size;
+
+ if (nicbuf) {
+ lio_unmap_ring(oct->pci_dev,
+ (u64)
+ droq->desc_ring
+ [droq->read_idx].
+ buffer_ptr,
+ droq->
+ buffer_size);
+ octeon_fast_packet_next(droq,
+ nicbuf,
+ cpy_len,
+ droq->
+ read_idx
+ );
+ }
+
+ pkt_len += cpy_len;
+ INCR_INDEX_BY1(droq->read_idx,
+ droq->max_count);
+ droq->refill_count++;
+ }
+ }
+
+ if (nicbuf) {
+ if (droq->ops.fptr)
+ droq->ops.fptr(oct->octeon_id,
+ nicbuf, pkt_len,
+ rh, &droq->napi);
+ else
+ recv_buffer_free(nicbuf);
+ }
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = octeon_droq_refill(oct, droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel((desc_refilled), droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ mmiowb();
+ }
+
+ } /* for ( each packet )... */
+
+ /* Increment refill_count by the number of buffers processed. */
+ droq->stats.pkts_received += pkt;
+ droq->stats.bytes_received += total_len;
+
+ if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
+ octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
+
+ droq->stats.dropped_toomany += (pkts_to_process - pkt);
+ return pkts_to_process;
+ }
+
+ return pkt;
+}
+
+int
+octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget)
+{
+ u32 pkt_count = 0, pkts_processed = 0;
+ struct list_head *tmp, *tmp2;
+
+ pkt_count = atomic_read(&droq->pkts_pending);
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ /* Grab the lock */
+ spin_lock(&droq->lock);
+
+ pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
+
+ atomic_sub(pkts_processed, &droq->pkts_pending);
+
+ /* Release the spin lock */
+ spin_unlock(&droq->lock);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ /* If there are packets pending. schedule tasklet again */
+ if (atomic_read(&droq->pkts_pending))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Utility function to poll for packets. check_hw_for_packets must be
+ * called before calling this routine.
+ */
+
+static int
+octeon_droq_process_poll_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq, u32 budget)
+{
+ struct list_head *tmp, *tmp2;
+ u32 pkts_available = 0, pkts_processed = 0;
+ u32 total_pkts_processed = 0;
+
+ if (budget > droq->max_count)
+ budget = droq->max_count;
+
+ spin_lock(&droq->lock);
+
+ while (total_pkts_processed < budget) {
+ pkts_available =
+ CVM_MIN((budget - total_pkts_processed),
+ (u32)(atomic_read(&droq->pkts_pending)));
+
+ if (pkts_available == 0)
+ break;
+
+ pkts_processed =
+ octeon_droq_fast_process_packets(oct, droq,
+ pkts_available);
+
+ atomic_sub(pkts_processed, &droq->pkts_pending);
+
+ total_pkts_processed += pkts_processed;
+
+ octeon_droq_check_hw_for_pkts(oct, droq);
+ }
+
+ spin_unlock(&droq->lock);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ return total_pkts_processed;
+}
+
+int
+octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
+ u32 arg)
+{
+ struct octeon_droq *droq;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (cmd == POLL_EVENT_PROCESS_PKTS)
+ return octeon_droq_process_poll_pkts(oct, droq, arg);
+
+ if (cmd == POLL_EVENT_PENDING_PKTS) {
+ u32 pkt_cnt = atomic_read(&droq->pkts_pending);
+
+ return octeon_droq_process_packets(oct, droq, pkt_cnt);
+ }
+
+ if (cmd == POLL_EVENT_ENABLE_INTR) {
+ u32 value;
+ unsigned long flags;
+
+ /* Enable Pkt Interrupt */
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ spin_lock_irqsave
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ value =
+ octeon_read_csr(oct,
+ CN6XXX_SLI_PKT_TIME_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_PKT_TIME_INT_ENB,
+ value);
+ value =
+ octeon_read_csr(oct,
+ CN6XXX_SLI_PKT_CNT_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_PKT_CNT_INT_ENB,
+ value);
+
+ /* don't bother flushing the enables */
+
+ spin_unlock_irqrestore
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ return 0;
+ }
+ break;
+ }
+
+ return 0;
+ }
+
+ dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd);
+ return -EINVAL;
+}
+
+int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
+ struct octeon_droq_ops *ops)
+{
+ struct octeon_droq *droq;
+ unsigned long flags;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (!(ops)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ spin_lock_irqsave(&droq->lock, flags);
+
+ memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
+
+ spin_unlock_irqrestore(&droq->lock, flags);
+
+ return 0;
+}
+
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
+{
+ unsigned long flags;
+ struct octeon_droq *droq;
+ struct octeon_config *oct_cfg = NULL;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, oct->num_oqs - 1);
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (!droq) {
+ dev_info(&oct->pci_dev->dev,
+ "Droq id (%d) not available.\n", q_no);
+ return 0;
+ }
+
+ spin_lock_irqsave(&droq->lock, flags);
+
+ droq->ops.fptr = NULL;
+ droq->ops.drop_on_max = 0;
+
+ spin_unlock_irqrestore(&droq->lock, flags);
+
+ return 0;
+}
+
+int octeon_create_droq(struct octeon_device *oct,
+ u32 q_no, u32 num_descs,
+ u32 desc_size, void *app_ctx)
+{
+ struct octeon_droq *droq;
+
+ if (oct->droq[q_no]) {
+ dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
+ q_no);
+ return 1;
+ }
+
+ /* Allocate the DS for the new droq. */
+ droq = vmalloc(sizeof(*droq));
+ if (!droq)
+ goto create_droq_fail;
+ memset(droq, 0, sizeof(struct octeon_droq));
+
+ /*Disable the pkt o/p for this Q */
+ octeon_set_droq_pkt_op(oct, q_no, 0);
+ oct->droq[q_no] = droq;
+
+ /* Initialize the Droq */
+ octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx);
+
+ oct->num_oqs++;
+
+ dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
+ oct->num_oqs);
+
+ /* Global Droq register settings */
+
+ /* As of now not required, as setting are done for all 32 Droqs at
+ * the same time.
+ */
+ return 0;
+
+create_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return -1;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
new file mode 100644
index 000000000000..7940ccee12d9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -0,0 +1,426 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_droq.h
+ * \brief Implementation of Octeon Output queues. "Output" is with
+ * respect to the Octeon device on the NIC. From this driver's point of
+ * view they are ingress queues.
+ */
+
+#ifndef __OCTEON_DROQ_H__
+#define __OCTEON_DROQ_H__
+
+/* Default number of packets that will be processed in one iteration. */
+#define MAX_PACKET_BUDGET 0xFFFFFFFF
+
+/** Octeon descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a octeon_droq_info structure.
+ * The Octeon device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct octeon_droq_desc {
+ /** The buffer pointer */
+ u64 buffer_ptr;
+
+ /** The Info pointer */
+ u64 info_ptr;
+};
+
+#define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct octeon_droq_info {
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+
+ /** The Length of the packet. */
+ u64 length;
+};
+
+#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+*/
+struct octeon_recv_buffer {
+ /** Packet buffer, including metadata. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ u8 *data;
+};
+
+#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct oct_droq_stats {
+ /** Number of packets received in this queue. */
+ u64 pkts_received;
+
+ /** Bytes received by this queue. */
+ u64 bytes_received;
+
+ /** Packets dropped due to no dispatch function. */
+ u64 dropped_nodispatch;
+
+ /** Packets dropped due to no memory available. */
+ u64 dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ u64 dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ u64 rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ u64 rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ u64 rx_dropped;
+};
+
+#define POLL_EVENT_INTR_ARRIVED 1
+#define POLL_EVENT_PROCESS_PKTS 2
+#define POLL_EVENT_PENDING_PKTS 3
+#define POLL_EVENT_ENABLE_INTR 4
+
+/* The maximum number of buffers that can be dispatched from the
+ * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that
+ * max packet size from DROQ is 64K.
+ */
+#define MAX_RECV_BUFS 64
+
+/** Receive Packet format used when dispatching output queue packets
+ * with non-raw opcodes.
+ * The received packet will be sent to the upper layers using this
+ * structure which is passed as a parameter to the dispatch function
+ */
+struct octeon_recv_pkt {
+ /** Number of buffers in this received packet */
+ u16 buffer_count;
+
+ /** Id of the device that is sending the packet up */
+ u16 octeon_id;
+
+ /** Length of data in the packet buffer */
+ u32 length;
+
+ /** The receive header */
+ union octeon_rh rh;
+
+ /** Pointer to the OS-specific packet buffer */
+ void *buffer_ptr[MAX_RECV_BUFS];
+
+ /** Size of the buffers pointed to by ptr's in buffer_ptr */
+ u32 buffer_size[MAX_RECV_BUFS];
+};
+
+#define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt))
+
+/** The first parameter of a dispatch function.
+ * For a raw mode opcode, the driver dispatches with the device
+ * pointer in this structure.
+ * For non-raw mode opcode, the driver dispatches the recv_pkt
+ * created to contain the buffers with data received from Octeon.
+ * ---------------------
+ * | *recv_pkt ----|---
+ * |-------------------| |
+ * | 0 or more bytes | |
+ * | reserved by driver| |
+ * |-------------------|<-/
+ * | octeon_recv_pkt |
+ * | |
+ * |___________________|
+ */
+struct octeon_recv_info {
+ void *rsvd;
+ struct octeon_recv_pkt *recv_pkt;
+};
+
+#define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info))
+
+/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info
+ * structure is filled in before this call returns.
+ * @param extra_bytes - extra bytes to be allocated at the end of the recv info
+ * structure.
+ * @return - pointer to a newly allocated recv_info structure.
+ */
+static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes)
+{
+ struct octeon_recv_info *recv_info;
+ u8 *buf;
+
+ buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE +
+ extra_bytes, GFP_ATOMIC);
+ if (!buf)
+ return NULL;
+
+ recv_info = (struct octeon_recv_info *)buf;
+ recv_info->recv_pkt =
+ (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE);
+ recv_info->rsvd = NULL;
+ if (extra_bytes)
+ recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE;
+
+ return recv_info;
+}
+
+/** Free a recv_info structure.
+ * @param recv_info - Pointer to receive_info to be freed
+ */
+static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info)
+{
+ kfree(recv_info);
+}
+
+typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *);
+
+/** Used by NIC module to register packet handler and to get device
+ * information for each octeon device.
+ */
+struct octeon_droq_ops {
+ /** This registered function will be called by the driver with
+ * the octeon id, pointer to buffer from droq and length of
+ * data in the buffer. The receive header gives the port
+ * number to the caller. Function pointer is set by caller.
+ */
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+
+ /* This function will be called by the driver for all NAPI related
+ * events. The first param is the octeon id. The second param is the
+ * output queue number. The third is the NAPI event that occurred.
+ */
+ void (*napi_fn)(void *);
+
+ u32 poll_mode;
+
+ /** Flag indicating if the DROQ handler should drop packets that
+ * it cannot handle in one iteration. Set by caller.
+ */
+ u32 drop_on_max;
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon DROQ.
+ */
+struct octeon_droq {
+ /** A spinlock to protect access to this ring. */
+ spinlock_t lock;
+
+ u32 q_no;
+
+ struct octeon_droq_ops ops;
+
+ struct octeon_device *oct_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct octeon_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ u32 read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ u32 write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ u32 refill_idx;
+
+ /** Packets pending to be processed */
+ atomic_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ u32 max_count;
+
+ /** The number of descriptors pending refill. */
+ u32 refill_count;
+
+ u32 pkts_per_intr;
+ u32 refill_threshold;
+
+ /** The max number of descriptors in DROQ without a buffer.
+ * This field is used to keep track of empty space threshold. If the
+ * refill_count reaches this value, the DROQ cannot accept a max-sized
+ * (64K) packet.
+ */
+ u32 max_empty_descs;
+
+ /** The 8B aligned info ptrs begin from this address. */
+ struct octeon_droq_info *info_list;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct octeon_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void __iomem *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void __iomem *pkts_sent_reg;
+
+ struct list_head dispatch_list;
+
+ /** Statistics for this DROQ. */
+ struct oct_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** Info ptr list are allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /** DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /** Allocated size of info list. */
+ u32 info_alloc_size;
+
+ /** application context */
+ void *app_ctx;
+
+ struct napi_struct napi;
+
+ u32 cpu_id;
+
+ struct call_single_data csd;
+};
+
+#define OCT_DROQ_SIZE (sizeof(struct octeon_droq))
+
+/**
+ * Allocates space for the descriptor ring for the droq and sets the
+ * base addr, num desc etc in Octeon registers.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: 1
+*/
+int octeon_init_droq(struct octeon_device *oct_dev,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx);
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @return: Success: 0 Failure: 1
+*/
+int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
+
+/** Register a change in droq operations. The ops field has a pointer to a
+ * function which will called by the DROQ handler for all packets arriving
+ * on output queues given by q_no irrespective of the type of packet.
+ * The ops field also has a flag which if set tells the DROQ handler to
+ * drop packets if it receives more than what it can process in one
+ * invocation of the handler.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @param ops - the droq_ops settings for this queue
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int
+octeon_register_droq_ops(struct octeon_device *oct,
+ u32 q_no,
+ struct octeon_droq_ops *ops);
+
+/** Resets the function pointer and flag settings made by
+ * octeon_register_droq_ops(). After this routine is called, the DROQ handler
+ * will lookup dispatch function for each arriving packet on the output queue
+ * given by q_no.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
+
+/** Register a dispatch function for a opcode/subcode. The driver will call
+ * this dispatch function when it receives a packet with the given
+ * opcode/subcode in its output queues along with the user specified
+ * argument.
+ * @param oct - the octeon device to register with.
+ * @param opcode - the opcode for which the dispatch will be registered.
+ * @param subcode - the subcode for which the dispatch will be registered
+ * @param fn - the dispatch function.
+ * @param fn_arg - user specified that will be passed along with the
+ * dispatch function by the driver.
+ * @return Success: 0; Failure: 1
+ */
+int octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg);
+
+/** Remove registration for an opcode/subcode. This will delete the mapping for
+ * an opcode/subcode. The dispatch function will be unregistered and will no
+ * longer be called if a packet with the opcode/subcode arrives in the driver
+ * output queues.
+ * @param oct - the octeon device to unregister from.
+ * @param opcode - the opcode to be unregistered.
+ * @param subcode - the subcode to be unregistered.
+ *
+ * @return Success: 0; Failure: 1
+ */
+int octeon_unregister_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode);
+
+void octeon_droq_print_stats(void);
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq);
+
+int octeon_create_droq(struct octeon_device *oct, u32 q_no,
+ u32 num_descs, u32 desc_size, void *app_ctx);
+
+int octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget);
+
+int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no,
+ int cmd, u32 arg);
+
+#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
new file mode 100644
index 000000000000..592fe49b589d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -0,0 +1,319 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_iq.h
+ * \brief Host Driver: Implementation of Octeon input queues. "Input" is
+ * with respect to the Octeon device on the NIC. From this driver's
+ * point of view they are egress queues.
+ */
+
+#ifndef __OCTEON_IQ_H__
+#define __OCTEON_IQ_H__
+
+#define IQ_STATUS_RUNNING 1
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+/*------------------------- INSTRUCTION QUEUE --------------------------*/
+
+/* \cond */
+
+#define REQTYPE_NONE 0
+#define REQTYPE_NORESP_NET 1
+#define REQTYPE_NORESP_NET_SG 2
+#define REQTYPE_RESP_NET 3
+#define REQTYPE_RESP_NET_SG 4
+#define REQTYPE_SOFT_COMMAND 5
+#define REQTYPE_LAST 5
+
+struct octeon_request_list {
+ u32 reqtype;
+ void *buf;
+};
+
+/* \endcond */
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct oct_iq_stats {
+ u64 instr_posted; /**< Instructions posted to this queue. */
+ u64 instr_processed; /**< Instructions processed in this queue. */
+ u64 instr_dropped; /**< Instructions that could not be processed */
+ u64 bytes_sent; /**< Bytes sent through this queue. */
+ u64 sgentry_sent;/**< Gather entries sent through this queue. */
+ u64 tx_done;/**< Num of packets sent to network. */
+ u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
+ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
+ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+};
+
+#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (upto 4) for
+ * a Octeon device has one such structure to represent it.
+*/
+struct octeon_instr_queue {
+ /** A spinlock to protect access to the input ring. */
+ spinlock_t lock;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ u32 iqcmd_64B:1;
+
+ /** Queue Number. */
+ u32 iq_no:5;
+
+ u32 rsvd:17;
+
+ /* Controls the periodic flushing of iq */
+ u32 do_auto_flush:1;
+
+ u32 status:8;
+
+ /** Maximum no. of instructions in this queue. */
+ u32 max_count;
+
+ /** Index in input ring where the driver should write the next packet */
+ u32 host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ u32 octeon_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u32 flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ u32 reset_instr_cnt;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ u8 *base_addr;
+
+ struct octeon_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void __iomem *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void __iomem *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /** The max. number of instructions that can be held pending by the
+ * driver.
+ */
+ u32 fill_threshold;
+
+ /** The last time that the doorbell was rung. */
+ u64 last_db_time;
+
+ /** The doorbell timeout. If the doorbell was not rung for this time and
+ * fill_cnt is non-zero, ring the doorbell again.
+ */
+ u32 db_timeout;
+
+ /** Statistics for this input queue. */
+ struct oct_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ u64 base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+/** 32-byte instruction format.
+ * Format of instruction for a 32-byte mode input queue.
+ */
+struct octeon_instr_32B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /** Input Request Header. Additional info about the input. */
+ u64 irh;
+
+};
+
+#define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B))
+
+/** 64-byte instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ */
+struct octeon_instr_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Input Request Header. */
+ u64 irh;
+
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ u64 reserved;
+
+};
+
+#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
+
+/** The size of each buffer in soft command buffer pool
+ */
+#define SOFT_COMMAND_BUFFER_SIZE 1024
+
+struct octeon_soft_command {
+ /** Soft command buffer info. */
+ struct list_head node;
+ u64 dma_addr;
+ u32 size;
+
+ /** Command and return status */
+ struct octeon_instr_64B cmd;
+#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ u64 *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ u64 dmadptr;
+ u32 datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ u64 dmarptr;
+ u32 rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ u32 ctxsize;
+
+ /** Time out and callback */
+ size_t wait_time;
+ size_t timeout;
+ u32 iq_no;
+ void (*callback)(struct octeon_device *, u32, void *);
+ void *callback_arg;
+};
+
+/** Maximum number of buffers to allocate into soft command buffer pool
+ */
+#define MAX_SOFT_COMMAND_BUFFERS 16
+
+/** Head of a soft command buffer pool.
+ */
+struct octeon_sc_buffer_pool {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t alloc_buf_count;
+};
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_buffer_pool(struct octeon_device *oct);
+struct octeon_soft_command *
+ octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize, u32 rdatasize,
+ u32 ctxsize);
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+/**
+ * octeon_init_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be initialized (0 <= q_no <= 3).
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+ u32 num_descs);
+
+/**
+ * octeon_delete_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be deleted (0 <= q_no <= 3).
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no);
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct);
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *));
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq);
+
+int octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype);
+
+void octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode, u8 subcode,
+ u32 irh_ossp, u64 ossp0,
+ u64 ossp1);
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
+ u32 num_descs, void *app_ctx);
+
+#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
new file mode 100644
index 000000000000..cbd081981180
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -0,0 +1,237 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_main.h
+ * \brief Host Driver: This file is included by all host driver source files
+ * to include common definitions.
+ */
+
+#ifndef _OCTEON_MAIN_H_
+#define _OCTEON_MAIN_H_
+
+#if BITS_PER_LONG == 32
+#define CVM_CAST64(v) ((long long)(v))
+#elif BITS_PER_LONG == 64
+#define CVM_CAST64(v) ((long long)(long)(v))
+#else
+#error "Unknown system architecture"
+#endif
+
+#define DRV_NAME "LiquidIO"
+
+/**
+ * \brief determines if a given console has debug enabled.
+ * @param console console to check
+ * @returns 1 = enabled. 0 otherwise
+ */
+int octeon_console_debug_enabled(u32 console);
+
+/* BQL-related functions */
+void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl);
+
+/** Swap 8B blocks */
+static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
+{
+ while (blocks) {
+ cpu_to_be64s(data);
+ blocks--;
+ data++;
+ }
+}
+
+/**
+ * \brief unmaps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ */
+static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
+{
+ dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
+ baridx);
+
+ if (oct->mmio[baridx].done)
+ iounmap(oct->mmio[baridx].hw_addr);
+
+ if (oct->mmio[baridx].start)
+ pci_release_region(oct->pci_dev, baridx * 2);
+}
+
+/**
+ * \brief maps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ * @param max_map_len maximum length of mapped memory
+ */
+static inline int octeon_map_pci_barx(struct octeon_device *oct,
+ int baridx, int max_map_len)
+{
+ u32 mapped_len = 0;
+
+ if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
+ dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
+ baridx);
+ return 1;
+ }
+
+ oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
+ oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
+
+ mapped_len = oct->mmio[baridx].len;
+ if (!mapped_len)
+ return 1;
+
+ if (max_map_len && (mapped_len > max_map_len))
+ mapped_len = max_map_len;
+
+ oct->mmio[baridx].hw_addr =
+ ioremap(oct->mmio[baridx].start, mapped_len);
+ oct->mmio[baridx].mapped_len = mapped_len;
+
+ dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
+ baridx, oct->mmio[baridx].start, mapped_len,
+ oct->mmio[baridx].len);
+
+ if (!oct->mmio[baridx].hw_addr) {
+ dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
+ baridx);
+ return 1;
+ }
+ oct->mmio[baridx].done = 1;
+
+ return 0;
+}
+
+static inline void *
+cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
+ u32 size,
+ u32 *alloc_size,
+ size_t *orig_ptr,
+ size_t *dma_addr __attribute__((unused)))
+{
+ int retries = 0;
+ void *ptr = NULL;
+
+#define OCTEON_MAX_ALLOC_RETRIES 1
+ do {
+ ptr =
+ (void *)__get_free_pages(GFP_KERNEL,
+ get_order(size));
+ if ((unsigned long)ptr & 0x07) {
+ free_pages((unsigned long)ptr, get_order(size));
+ ptr = NULL;
+ /* Increment the size required if the first
+ * attempt failed.
+ */
+ if (!retries)
+ size += 7;
+ }
+ retries++;
+ } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
+
+ *alloc_size = size;
+ *orig_ptr = (unsigned long)ptr;
+ if ((unsigned long)ptr & 0x07)
+ ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
+ return ptr;
+}
+
+#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
+ free_pages(orig_ptr, get_order(size))
+
+static inline void
+sleep_cond(wait_queue_head_t *wait_queue, int *condition)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(wait_queue, &we);
+ while (!(ACCESS_ONCE(*condition))) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ goto out;
+ schedule();
+ }
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(wait_queue, &we);
+}
+
+static inline void
+sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(waitq, &we);
+ while (!atomic_read(pcond)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ goto out;
+ schedule();
+ }
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(waitq, &we);
+}
+
+/* Gives up the CPU for a timeout period.
+ * Check that the condition is not true before we go to sleep for a
+ * timeout period.
+ */
+static inline void
+sleep_timeout_cond(wait_queue_head_t *wait_queue,
+ int *condition,
+ int timeout)
+{
+ wait_queue_t we;
+
+ init_waitqueue_entry(&we, current);
+ add_wait_queue(wait_queue, &we);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!(*condition))
+ schedule_timeout(timeout);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(wait_queue, &we);
+}
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#ifndef ROUNDUP128
+#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
+#endif
+
+#endif /* _OCTEON_MAIN_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
new file mode 100644
index 000000000000..5aecef870377
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -0,0 +1,199 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+
+static inline void
+octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
+ u32 idx __attribute__((unused)))
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 mask;
+
+ mask = oct->fn_list.bar1_idx_read(oct, idx);
+ mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
+ oct->fn_list.bar1_idx_write(oct, idx, mask);
+#endif
+}
+
+static void
+octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ writeb(*(hostbuf++), mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ writeq(*((u64 *)hostbuf), mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ writeb(*(hostbuf++), mapped_addr++);
+}
+
+static void
+octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ *(hostbuf++) = readb(mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ *((u64 *)hostbuf) = readq(mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ *(hostbuf++) = readb(mapped_addr++);
+}
+
+/* Core mem read/write with temporary bar1 settings. */
+/* op = 1 to read, op = 0 to write. */
+static void
+__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
+ u8 *hostbuf, u32 len, u32 op)
+{
+ u32 copy_len = 0, index_reg_val = 0;
+ unsigned long flags;
+ u8 __iomem *mapped_addr;
+
+ spin_lock_irqsave(&oct->mem_access_lock, flags);
+
+ /* Save the original index reg value. */
+ index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
+ do {
+ oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
+ mapped_addr = oct->mmio[1].hw_addr
+ + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
+
+ /* If operation crosses a 4MB boundary, split the transfer
+ * at the 4MB
+ * boundary.
+ */
+ if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
+ copy_len = (u32)(((addr & ~(0x3fffff)) +
+ (MEMOPS_IDX << 22)) - addr);
+ } else {
+ copy_len = len;
+ }
+
+ if (op) { /* read from core */
+ octeon_pci_fastread(oct, mapped_addr, hostbuf,
+ copy_len);
+ } else {
+ octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
+ copy_len);
+ }
+
+ len -= copy_len;
+ addr += copy_len;
+ hostbuf += copy_len;
+
+ } while (len);
+
+ oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
+
+ spin_unlock_irqrestore(&oct->mem_access_lock, flags);
+}
+
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
+}
+
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0);
+}
+
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
+{
+ __be64 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
+
+ return be64_to_cpu(ret);
+}
+
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
+{
+ __be32 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
+
+ return be32_to_cpu(ret);
+}
+
+void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
+ u32 val)
+{
+ __be32 t = cpu_to_be32(val);
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
new file mode 100644
index 000000000000..11b183377b44
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -0,0 +1,75 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_mem_ops.h
+ * \brief Host Driver: Routines used to read/write Octeon memory.
+ */
+
+#ifndef __OCTEON_MEM_OPS_H__
+#define __OCTEON_MEM_OPS_H__
+
+/** Read a 64-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * The range_idx gives the BAR1 index register for the range of address
+ * in which core_addr is mapped.
+ *
+ * @return 64-bit value read from Core memory
+ */
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr);
+
+/** Read a 32-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * @return 32-bit value read from Core memory
+ */
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr);
+
+/** Write a 32-bit value to a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to write to.
+ * @param val - 32-bit value to write.
+ */
+void
+octeon_write_device_mem32(struct octeon_device *oct,
+ u64 core_addr,
+ u32 val);
+
+/** Read multiple bytes from Octeon memory.
+ */
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+/** Write multiple bytes into Octeon memory.
+ */
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
new file mode 100644
index 000000000000..b3abe5818fd3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -0,0 +1,224 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_network.h
+ * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
+ */
+
+#ifndef __OCTEON_NETWORK_H__
+#define __OCTEON_NETWORK_H__
+#include <linux/version.h>
+#include <linux/dma-mapping.h>
+#include <linux/ptp_clock_kernel.h>
+
+/** LiquidIO per-interface network private data */
+struct lio {
+ /** State of the interface. Rx/Tx happens only in the RUNNING state. */
+ atomic_t ifstate;
+
+ /** Octeon Interface index number. This device will be represented as
+ * oct<ifidx> in the system.
+ */
+ int ifidx;
+
+ /** Octeon Input queue to use to transmit for this network interface. */
+ int txq;
+
+ /** Octeon Output queue from which pkts arrive
+ * for this network interface.
+ */
+ int rxq;
+
+ /** Guards the glist */
+ spinlock_t lock;
+
+ /** Linked list of gather components */
+ struct list_head glist;
+
+ /** Pointer to the NIC properties for the Octeon device this network
+ * interface is associated with.
+ */
+ struct octdev_props *octprops;
+
+ /** Pointer to the octeon device structure. */
+ struct octeon_device *oct_dev;
+
+ struct net_device *netdev;
+
+ /** Link information sent by the core application for this interface. */
+ struct oct_link_info linfo;
+
+ /** Size of Tx queue for this octeon device. */
+ u32 tx_qsize;
+
+ /** Size of Rx queue for this octeon device. */
+ u32 rx_qsize;
+
+ /** Size of MTU this octeon device. */
+ u32 mtu;
+
+ /** msg level flag per interface. */
+ u32 msg_enable;
+
+ /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
+ u64 dev_capability;
+
+ /** Copy of beacaon reg in phy */
+ u32 phy_beacon_val;
+
+ /** Copy of ctrl reg in phy */
+ u32 led_ctrl_val;
+
+ /* PTP clock information */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ s64 ptp_adjust;
+
+ /* for atomic access to Octeon PTP reg and data struct */
+ spinlock_t ptp_lock;
+
+ /* Interface info */
+ u32 intf_open;
+
+ /* work queue for txq status */
+ struct cavium_wq txq_status_wq;
+
+};
+
+#define LIO_SIZE (sizeof(struct lio))
+#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+
+/**
+ * \brief Enable or disable feature
+ * @param netdev pointer to network device
+ * @param cmd Command that just requires acknowledgment
+ */
+int liquidio_set_feature(struct net_device *netdev, int cmd);
+
+/**
+ * \brief Link control command completion callback
+ * @param nctrl_ptr pointer to control packet structure
+ *
+ * This routine is called by the callback function when a ctrl pkt sent to
+ * core app completes. The nctrl_ptr contains a copy of the command type
+ * and data sent to the core app. This routine is only called if the ctrl
+ * pkt was sent successfully to the core app.
+ */
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
+
+/**
+ * \brief Register ethtool operations
+ * @param netdev pointer to network device
+ */
+void liquidio_set_ethtool_ops(struct net_device *netdev);
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
+ u32 q_no __attribute__((unused)), u32 size)
+{
+#define SKB_ADJ_MASK 0x3F
+#define SKB_ADJ (SKB_ADJ_MASK + 1)
+
+ struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ return (void *)skb;
+}
+
+static inline void recv_buffer_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+#define lio_dma_alloc(oct, size, dma_addr) \
+ dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL)
+#define lio_dma_free(oct, size, virt_addr, dma_addr) \
+ dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
+
+#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
+
+static inline u64
+lio_map_ring_info(struct octeon_droq *droq, u32 i)
+{
+ dma_addr_t dma_addr;
+ struct octeon_device *oct = droq->oct_dev;
+
+ dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
+ OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
+
+ BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring_info(struct pci_dev *pci_dev,
+ u64 info_ptr, u32 size)
+{
+ dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+}
+
+static inline u64
+lio_map_ring(struct pci_dev *pci_dev,
+ void *buf, u32 size)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
+ DMA_FROM_DEVICE);
+
+ BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring(struct pci_dev *pci_dev,
+ u64 buf_ptr, u32 size)
+{
+ dma_unmap_single(&pci_dev->dev,
+ buf_ptr, size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 q_no, u32 size)
+{
+ return recv_buffer_alloc(oct, q_no, size);
+}
+
+static inline void octeon_fast_packet_next(struct octeon_droq *droq,
+ struct sk_buff *nicbuf,
+ int copy_len,
+ int idx)
+{
+ memcpy(skb_put(nicbuf, copy_len),
+ get_rbd(droq->recv_buf_list[idx].buffer), copy_len);
+}
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
new file mode 100644
index 000000000000..1a0191549cb3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -0,0 +1,189 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ struct octeon_instr_64B *cmd,
+ size_t rdatasize)
+{
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, 0, rdatasize, 0);
+
+ if (!sc)
+ return NULL;
+
+ /* Copy existing command structure into the soft command */
+ memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+
+ /* Add in the response related fields. Opcode and Param are already
+ * there.
+ */
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh->rflag = 1; /* a response is required */
+ irh->len = 4; /* means four 64-bit words immediately follow irh */
+
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = rdatasize;
+
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ sc->wait_time = 1000;
+ sc->timeout = jiffies + sc->wait_time;
+
+ return sc;
+}
+
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ u32 xmit_more)
+{
+ int ring_doorbell;
+
+ ring_doorbell = !xmit_more;
+
+ return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
+ ndata->buf, ndata->datasize,
+ ndata->reqtype);
+}
+
+static void octnet_link_ctrl_callback(struct octeon_device *oct,
+ u32 status,
+ void *sc_ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr;
+ struct octnic_ctrl_pkt *nctrl;
+
+ nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr;
+
+ /* Call the callback function if status is OK.
+ * Status is OK only if a response was expected and core returned
+ * success.
+ * If no response was expected, status is OK if the command was posted
+ * successfully.
+ */
+ if (!status && nctrl->cb_fn)
+ nctrl->cb_fn(nctrl);
+
+ octeon_free_soft_command(oct, sc);
+}
+
+static inline struct octeon_soft_command
+*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams)
+{
+ struct octeon_soft_command *sc = NULL;
+ u8 *data;
+ size_t rdatasize;
+ u32 uddsize = 0, datasize = 0;
+
+ uddsize = (u32)(nctrl->ncmd.s.more * 8);
+
+ datasize = OCTNET_CMD_SIZE + uddsize;
+ rdatasize = (nctrl->wait_time) ? 16 : 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, datasize, rdatasize,
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return NULL;
+
+ memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt));
+
+ data = (u8 *)sc->virtdptr;
+
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+
+ octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
+ }
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
+ 0, 0, 0);
+
+ sc->callback = octnet_link_ctrl_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = nctrl->wait_time;
+
+ return sc;
+}
+
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams)
+{
+ int retval;
+ struct octeon_soft_command *sc = NULL;
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
+ __func__);
+ return -1;
+ }
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval) {
+ octeon_free_soft_command(oct, sc);
+ dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
+ __func__, retval);
+ return -1;
+ }
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
new file mode 100644
index 000000000000..0238857c8105
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -0,0 +1,227 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_nic.h
+ * \brief Host NIC Driver: Routine to send network data &
+ * control packet to Octeon.
+ */
+
+#ifndef __OCTEON_NIC_H__
+#define __OCTEON_NIC_H__
+
+/* Maximum number of 8-byte words can be sent in a NIC control message.
+ */
+#define MAX_NCTRL_UDD 32
+
+typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *);
+
+/* Structure of control information passed by the NIC module to the OSI
+ * layer when sending control commands to Octeon device software.
+ */
+struct octnic_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octnet_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ u64 dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ u64 dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ u64 udd[MAX_NCTRL_UDD];
+
+ /** Time to wait for Octeon software to respond to this control command.
+ * If wait_time is 0, OSI assumes no response is expected.
+ */
+ size_t wait_time;
+
+ /** The network device that issued the control command. */
+ u64 netpndev;
+
+ /** Callback function called when the command has been fetched */
+ octnic_ctrl_pkt_cb_fn_t cb_fn;
+};
+
+#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd))
+
+/** Structure of data information passed by the NIC module to the OSI
+ * layer when forwarding data to Octeon device software.
+ */
+struct octnic_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * OSI layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ u32 reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ u32 datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ struct octeon_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ u32 q_no;
+
+};
+
+/** Structure passed by NIC module to OSI layer to prepare a command to send
+ * network data to Octeon.
+ */
+union octnic_cmd_setup {
+ struct {
+ u32 ifidx:8;
+ u32 cksum_offset:7;
+ u32 gather:1;
+ u32 timestamp:1;
+ u32 ipv4opts_ipv6exthdr:2;
+ u32 ip_csum:1;
+ u32 tnl_csum:1;
+
+ u32 rsvd:11;
+ union {
+ u32 datasize;
+ u32 gatherptrs;
+ } u;
+ } s;
+
+ u64 u64;
+
+};
+
+struct octnic_ctrl_params {
+ u32 resp_order;
+};
+
+static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
+{
+ return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
+ >= (oct->instr_queue[q_no]->max_count - 2));
+}
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ union octnic_packet_params packet_params;
+
+ memset(cmd, 0, sizeof(struct octeon_instr_64B));
+
+ ih = (struct octeon_instr_ih *)&cmd->ih;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih->fsz = 24;
+
+ ih->tagtype = ORDERED_TAG;
+ ih->grp = DEFAULT_POW_GRP;
+
+ if (tag)
+ ih->tag = tag;
+ else
+ ih->tag = LIO_DATA(setup->s.ifidx);
+
+ ih->raw = 1;
+ ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
+
+ if (!setup->s.gather) {
+ ih->dlengsz = setup->s.u.datasize;
+ } else {
+ ih->gather = 1;
+ ih->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ irh = (struct octeon_instr_irh *)&cmd->irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
+ if (setup->s.cksum_offset) {
+ packet_params.s.csoffset = setup->s.cksum_offset;
+ packet_params.s.ipv4opts_ipv6exthdr =
+ setup->s.ipv4opts_ipv6exthdr;
+ }
+
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.ifidx = setup->s.ifidx;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+/** Allocate and a soft command with space for a response immediately following
+ * the commnad.
+ * @param oct - octeon device pointer
+ * @param cmd - pointer to the command structure, pre-filled for everything
+ * except the response.
+ * @param rdatasize - size in bytes of the response.
+ *
+ * @returns pointer to allocated buffer with command copied into it, and
+ * response space immediately following.
+ */
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ struct octeon_instr_64B *cmd,
+ size_t rdatasize);
+
+/** Send a NIC data packet to the device
+ * @param oct - octeon device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata, u32 xmit_more);
+
+/** Send a NIC control packet to the device
+ * @param oct - octeon device pointer
+ * @param nctrl - control structure with command, timout, and callback info
+ * @param nparams - response control structure
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl,
+ struct octnic_ctrl_params nparams);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
new file mode 100644
index 000000000000..356796bf9b87
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -0,0 +1,766 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
+struct iq_post_status {
+ int status;
+ int index;
+};
+
+static void check_db_timeout(struct work_struct *work);
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+
+static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
+
+static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
+{
+ struct octeon_instr_queue *iq =
+ (struct octeon_instr_queue *)oct->instr_queue[iq_no];
+ return iq->iqcmd_64B;
+}
+
+#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
+
+/* Define this to return the request status comaptible to old code */
+/*#define OCTEON_USE_OLD_REQ_STATUS*/
+
+/* Return 0 on success, 1 on failure */
+int octeon_init_instr_queue(struct octeon_device *oct,
+ u32 iq_no, u32 num_descs)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_iq_config *conf = NULL;
+ u32 q_size;
+ struct cavium_wq *db_wq;
+
+ if (OCTEON_CN6XXX(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
+
+ if (!conf) {
+ dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
+ oct->chip_id);
+ return 1;
+ }
+
+ if (num_descs & (num_descs - 1)) {
+ dev_err(&oct->pci_dev->dev,
+ "Number of descriptors for instr queue %d not in power of 2.\n",
+ iq_no);
+ return 1;
+ }
+
+ q_size = (u32)conf->instr_type * num_descs;
+
+ iq = oct->instr_queue[iq_no];
+
+ iq->base_addr = lio_dma_alloc(oct, q_size,
+ (dma_addr_t *)&iq->base_addr_dma);
+ if (!iq->base_addr) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return 1;
+ }
+
+ iq->max_count = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+ if (!iq->request_list) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ return 1;
+ }
+
+ memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
+
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
+ iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
+
+ iq->iq_no = iq_no;
+ iq->fill_threshold = (u32)conf->db_min;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octeon_read_index = 0;
+ iq->flush_index = 0;
+ iq->last_db_time = 0;
+ iq->do_auto_flush = 1;
+ iq->db_timeout = (u32)conf->db_timeout;
+ atomic_set(&iq->instr_pending, 0);
+
+ /* Initialize the spinlock for this instruction queue */
+ spin_lock_init(&iq->lock);
+
+ oct->io_qmask.iq |= (1 << iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+
+ oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ if (!oct->check_db_wq[iq_no].wq) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
+ iq_no);
+ return 1;
+ }
+
+ db_wq = &oct->check_db_wq[iq_no];
+
+ INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
+ db_wq->wk.ctxptr = oct;
+ db_wq->wk.ctxul = iq_no;
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+
+ return 0;
+}
+
+int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
+{
+ u64 desc_size = 0, q_size;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
+ flush_workqueue(oct->check_db_wq[iq_no].wq);
+ destroy_workqueue(oct->check_db_wq[iq_no].wq);
+
+ if (OCTEON_CN6XXX(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));
+
+ if (iq->request_list)
+ vfree(iq->request_list);
+
+ if (iq->base_addr) {
+ q_size = iq->max_count * desc_size;
+ lio_dma_free(oct, (u32)q_size, iq->base_addr,
+ iq->base_addr_dma);
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 0 on success, 1 on failure */
+int octeon_setup_iq(struct octeon_device *oct,
+ u32 iq_no,
+ u32 num_descs,
+ void *app_ctx)
+{
+ if (oct->instr_queue[iq_no]) {
+ dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
+ iq_no);
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ return 0;
+ }
+ oct->instr_queue[iq_no] =
+ vmalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[iq_no])
+ return 1;
+
+ memset(oct->instr_queue[iq_no], 0,
+ sizeof(struct octeon_instr_queue));
+
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+ vfree(oct->instr_queue[iq_no]);
+ oct->instr_queue[iq_no] = NULL;
+ return 1;
+ }
+
+ oct->num_iqs++;
+ oct->fn_list.enable_io_queues(oct);
+ return 0;
+}
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct)
+{
+ int i, retry = 1000, pending, instr_cnt = 0;
+
+ do {
+ instr_cnt = 0;
+
+ /*for (i = 0; i < oct->num_iqs; i++) {*/
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ if (!(oct->io_qmask.iq & (1UL << i)))
+ continue;
+ pending =
+ atomic_read(&oct->
+ instr_queue[i]->instr_pending);
+ if (pending)
+ __check_db_timeout(oct, i);
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ mmiowb();
+ iq->fill_cnt = 0;
+ iq->last_db_time = jiffies;
+ return;
+ }
+}
+
+static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
+ u8 *cmd)
+{
+ u8 *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline int
+__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
+ struct octeon_instr_queue *iq,
+ u32 force_db __attribute__((unused)), u8 *cmd)
+{
+ u32 index = -1;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
+ return -1;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ index = iq->host_write_index;
+ INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return index;
+}
+
+static inline struct iq_post_status
+__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
+ struct octeon_instr_queue *iq,
+ u32 force_db __attribute__((unused)), u8 *cmd)
+{
+ struct iq_post_status st;
+
+ st.status = IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
+ st.status = IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
+ st.status = IQ_SEND_STOP;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return st;
+}
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *))
+{
+ if (reqtype > REQTYPE_LAST) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
+ __func__, reqtype);
+ return -EINVAL;
+ }
+
+ reqtype_free_fn[oct->octeon_id][reqtype] = fn;
+
+ return 0;
+}
+
+static inline void
+__add_to_request_list(struct octeon_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq)
+{
+ int reqtype;
+ void *buf;
+ u32 old = iq->flush_index;
+ u32 inst_count = 0;
+ unsigned pkts_compl = 0, bytes_compl = 0;
+ struct octeon_soft_command *sc;
+ struct octeon_instr_irh *irh;
+
+ while (old != iq->octeon_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == REQTYPE_NONE)
+ goto skip_this;
+
+ octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
+ &bytes_compl);
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ case REQTYPE_RESP_NET_SG:
+ reqtype_free_fn[oct->octeon_id][reqtype](buf);
+ break;
+ case REQTYPE_RESP_NET:
+ case REQTYPE_SOFT_COMMAND:
+ sc = buf;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ if (irh->rflag) {
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ spin_lock_bh(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock);
+ atomic_inc(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node, &oct->response_list
+ [OCTEON_ORDERED_SC_LIST].head);
+ spin_unlock_bh(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock);
+ } else {
+ if (sc->callback) {
+ sc->callback(oct, OCTEON_REQUEST_DONE,
+ sc->callback_arg);
+ }
+ }
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev,
+ "%s Unknown reqtype: %d buf: %p at idx %d\n",
+ __func__, reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+ skip_this:
+ inst_count++;
+ INCR_INDEX_BY1(old, iq->max_count);
+ }
+ if (bytes_compl)
+ octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
+ bytes_compl);
+ iq->flush_index = old;
+
+ return inst_count;
+}
+
+static inline void
+update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ u32 inst_processed = 0;
+
+ /* Calculate how many commands Octeon has read and move the read index
+ * accordingly.
+ */
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+
+ /* Move the NORESPONSE requests to the per-device completion list. */
+ if (iq->flush_index != iq->octeon_read_index)
+ inst_processed = lio_process_iq_request_list(oct, iq);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+}
+
+static void
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh)
+{
+ if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
+ spin_lock_bh(&iq->lock);
+ update_iq_indices(oct, iq);
+ spin_unlock_bh(&iq->lock);
+ }
+}
+
+static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+{
+ struct octeon_instr_queue *iq;
+ u64 next_time;
+
+ if (!oct)
+ return;
+ iq = oct->instr_queue[iq_no];
+ if (!iq)
+ return;
+
+ /* If jiffies - last_db_time < db_timeout do nothing */
+ next_time = iq->last_db_time + iq->db_timeout;
+ if (!time_after(jiffies, (unsigned long)next_time))
+ return;
+ iq->last_db_time = jiffies;
+
+ /* Get the lock and prevent tasklets. This routine gets called from
+ * the poll thread. Instructions can now be posted in tasklet context
+ */
+ spin_lock_bh(&iq->lock);
+ if (iq->fill_cnt != 0)
+ ring_doorbell(oct, iq);
+
+ spin_unlock_bh(&iq->lock);
+
+ /* Flush the instruction queue */
+ if (iq->do_auto_flush)
+ octeon_flush_iq(oct, iq, 1);
+}
+
+/* Called by the Poll thread at regular intervals to check the instruction
+ * queue for commands to be posted and for commands that were fetched by Octeon.
+ */
+static void check_db_timeout(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ unsigned long iq_no = wk->ctxul;
+ struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+
+ __check_db_timeout(oct, iq_no);
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+}
+
+int
+octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype)
+{
+ struct iq_post_status st;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ spin_lock_bh(&iq->lock);
+
+ st = __post_command2(oct, iq, force_db, cmd);
+
+ if (st.status != IQ_SEND_FAILED) {
+ octeon_report_sent_bytes_to_bql(buf, reqtype);
+ __add_to_request_list(iq, st.index, buf, reqtype);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
+
+ if (iq->fill_cnt >= iq->fill_threshold || force_db)
+ ring_doorbell(oct, iq);
+ } else {
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
+ }
+
+ spin_unlock_bh(&iq->lock);
+
+ if (iq->do_auto_flush)
+ octeon_flush_iq(oct, iq, 2);
+
+ return st.status;
+}
+
+void
+octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode,
+ u8 subcode,
+ u32 irh_ossp,
+ u64 ossp0,
+ u64 ossp1)
+{
+ struct octeon_config *oct_cfg;
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ BUG_ON(opcode > 15);
+ BUG_ON(subcode > 127);
+
+ oct_cfg = octeon_get_conf(oct);
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ ih->tagtype = ATOMIC_TAG;
+ ih->tag = LIO_CONTROL;
+ ih->raw = 1;
+ ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih->dlengsz = sc->datasize;
+ ih->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.ossp[0] = ossp0;
+ sc->cmd.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ irh->len = 4;
+ ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ } else {
+ irh->rflag = 0;
+ irh->len = 2;
+ ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ }
+
+ while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
+ sc->iq_no++;
+}
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ struct octeon_instr_ih *ih;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ ih = (struct octeon_instr_ih *)&sc->cmd.ih;
+ if (ih->dlengsz) {
+ BUG_ON(!sc->dmadptr);
+ sc->cmd.dptr = sc->dmadptr;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ if (irh->rflag) {
+ BUG_ON(!sc->dmarptr);
+ BUG_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ sc->cmd.rptr = sc->dmarptr;
+ }
+
+ if (sc->wait_time)
+ sc->timeout = jiffies + sc->wait_time;
+
+ return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+}
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
+{
+ int i;
+ u64 dma_addr;
+ struct octeon_soft_command *sc;
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+ spin_lock_init(&oct->sc_buf_pool.lock);
+ atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
+
+ for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
+ sc = (struct octeon_soft_command *)
+ lio_dma_alloc(oct,
+ SOFT_COMMAND_BUFFER_SIZE,
+ (dma_addr_t *)&dma_addr);
+ if (!sc)
+ return 1;
+
+ sc->dma_addr = dma_addr;
+ sc->size = SOFT_COMMAND_BUFFER_SIZE;
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+ }
+
+ return 0;
+}
+
+int octeon_free_sc_buffer_pool(struct octeon_device *oct)
+{
+ struct list_head *tmp, *tmp2;
+ struct octeon_soft_command *sc;
+
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
+ list_del(tmp);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ lio_dma_free(oct, sc->size, sc, sc->dma_addr);
+ }
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+
+ return 0;
+}
+
+struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize,
+ u32 rdatasize,
+ u32 ctxsize)
+{
+ u64 dma_addr;
+ u32 size;
+ u32 offset = sizeof(struct octeon_soft_command);
+ struct octeon_soft_command *sc = NULL;
+ struct list_head *tmp;
+
+ BUG_ON((offset + datasize + rdatasize + ctxsize) >
+ SOFT_COMMAND_BUFFER_SIZE);
+
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ if (list_empty(&oct->sc_buf_pool.head)) {
+ spin_unlock(&oct->sc_buf_pool.lock);
+ return NULL;
+ }
+
+ list_for_each(tmp, &oct->sc_buf_pool.head)
+ break;
+
+ list_del(tmp);
+
+ atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ dma_addr = sc->dma_addr;
+ size = sc->size;
+
+ memset(sc, 0, sc->size);
+
+ sc->dma_addr = dma_addr;
+ sc->size = size;
+
+ if (ctxsize) {
+ sc->ctxptr = (u8 *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (u8 *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ BUG_ON(rdatasize < 16);
+ sc->virtrptr = (u8 *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ spin_lock(&oct->sc_buf_pool.lock);
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+
+ atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock(&oct->sc_buf_pool.lock);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
new file mode 100644
index 000000000000..091f537a946e
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -0,0 +1,178 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include "octeon_config.h"
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
+#include "liquidio_image.h"
+
+static void oct_poll_req_completion(struct work_struct *work);
+
+int octeon_setup_response_list(struct octeon_device *oct)
+{
+ int i, ret = 0;
+ struct cavium_wq *cwq;
+
+ for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
+ INIT_LIST_HEAD(&oct->response_list[i].head);
+ spin_lock_init(&oct->response_list[i].lock);
+ atomic_set(&oct->response_list[i].pending_req_count, 0);
+ }
+
+ oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ if (!oct->dma_comp_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
+ return -ENOMEM;
+ }
+
+ cwq = &oct->dma_comp_wq;
+ INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
+ cwq->wk.ctxptr = oct;
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+
+ return ret;
+}
+
+void octeon_delete_response_list(struct octeon_device *oct)
+{
+ cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
+ flush_workqueue(oct->dma_comp_wq.wq);
+ destroy_workqueue(oct->dma_comp_wq.wq);
+}
+
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit)
+{
+ struct octeon_response_list *ordered_sc_list;
+ struct octeon_soft_command *sc;
+ int request_complete = 0;
+ int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
+ u32 status;
+ u64 status64;
+ struct octeon_instr_rdp *rdp;
+
+ ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
+
+ do {
+ spin_lock_bh(&ordered_sc_list->lock);
+
+ if (ordered_sc_list->head.next == &ordered_sc_list->head) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ return 1;
+ }
+
+ sc = (struct octeon_soft_command *)ordered_sc_list->
+ head.next;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+
+ status = OCTEON_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
+ sc->cmd.rptr, rdp->rlen,
+ DMA_FROM_DEVICE);
+ status64 = *sc->status_word;
+
+ if (status64 != COMPLETION_WORD_INIT) {
+ if ((status64 & 0xff) != 0xff) {
+ octeon_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ status = (u32)(status64 &
+ 0xffffffffULL);
+ }
+ }
+ } else if (force_quit || (sc->timeout &&
+ time_after(jiffies, (unsigned long)sc->timeout))) {
+ status = OCTEON_REQUEST_TIMEOUT;
+ }
+
+ if (status != OCTEON_REQUEST_PENDING) {
+ /* we have received a response or we have timed out */
+ /* remove node from linked list */
+ list_del(&sc->node);
+ atomic_dec(&octeon_dev->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(octeon_dev, status,
+ sc->callback_arg);
+
+ request_complete++;
+
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit
+ * and let this function be invoked the next time the poll
+ * thread runs
+ * to process the remaining requests. This function can take up
+ * the entire CPU if there is no upper limit to the requests
+ * processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static void oct_poll_req_completion(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ struct cavium_wq *cwq = &oct->dma_comp_wq;
+
+ lio_process_ordered_list(oct, 0);
+
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
new file mode 100644
index 000000000000..7a48752dcb10
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -0,0 +1,140 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2015 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file response_manager.h
+ * \brief Host Driver: Response queues for host instructions.
+ */
+
+#ifndef __RESPONSE_MANAGER_H__
+#define __RESPONSE_MANAGER_H__
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Head of a response list. There are several response lists in the
+ * system. One for each response order- Unordered, ordered
+ * and 1 for noresponse entries on each instruction queue.
+ */
+struct octeon_response_list {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t pending_req_count;
+};
+
+/** The type of response list.
+ */
+enum {
+ OCTEON_ORDERED_LIST = 0,
+ OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
+ OCTEON_UNORDERED_BLOCKING_LIST = 2,
+ OCTEON_ORDERED_SC_LIST = 3
+};
+
+/** Response Order values for a Octeon Request. */
+enum {
+ OCTEON_RESP_ORDERED = 0,
+ OCTEON_RESP_UNORDERED = 1,
+ OCTEON_RESP_NORESPONSE = 2
+};
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ---------------------------------
+ * | | |
+ * ---------------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+
+/*------------ Error codes used by host driver -----------------*/
+#define DRIVER_MAJOR_ERROR_CODE 0x0000
+
+/** A value of 0x00000000 indicates no error i.e. success */
+#define DRIVER_ERROR_NONE 0x00000000
+
+/** (Major number: 0x0000; Minor Number: 0x0001) */
+#define DRIVER_ERROR_REQ_PENDING 0x00000001
+#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003
+#define DRIVER_ERROR_REQ_EINTR 0x00000004
+#define DRIVER_ERROR_REQ_ENXIO 0x00000006
+#define DRIVER_ERROR_REQ_ENOMEM 0x0000000C
+#define DRIVER_ERROR_REQ_EINVAL 0x00000016
+#define DRIVER_ERROR_REQ_FAILED 0x000000ff
+
+/** Status for a request.
+ * If a request is not queued to Octeon by the driver, the driver returns
+ * an error condition that's describe by one of the OCTEON_REQ_ERR_* value
+ * below. If the request is successfully queued, the driver will return
+ * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and
+ * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the
+ * response for request failed to arrive before a time-out period or if
+ * the request processing * got interrupted due to a signal respectively.
+ */
+enum {
+ OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE),
+ OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING),
+ OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT),
+ OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR),
+ OCTEON_REQUEST_NO_DEVICE = (0x00000021),
+ OCTEON_REQUEST_NOT_RUNNING,
+ OCTEON_REQUEST_INVALID_IQ,
+ OCTEON_REQUEST_INVALID_BUFCNT,
+ OCTEON_REQUEST_INVALID_RESP_ORDER,
+ OCTEON_REQUEST_NO_MEMORY,
+ OCTEON_REQUEST_INVALID_BUFSIZE,
+ OCTEON_REQUEST_NO_PENDING_ENTRY,
+ OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF)
+
+};
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param octeon_dev - the octeon device structure.
+ */
+int octeon_setup_response_list(struct octeon_device *octeon_dev);
+
+void octeon_delete_response_list(struct octeon_device *octeon_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param octeon_dev - the octeon device structure.
+ * @param force_quit - the request is forced to timeout if this is 1
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644
index 000000000000..5c4615ccaa14
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644
index 000000000000..a3b43e50a576
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_H
+#define NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
+#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
+#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_MSIX_REG_BAR_NUM 4
+
+/* NIC SRIOV VF count */
+#define MAX_NUM_VFS_SUPPORTED 128
+#define DEFAULT_NUM_VF_ENABLED 8
+
+#define NIC_TNS_BYPASS_MODE 0
+#define NIC_TNS_MODE 1
+
+/* NIC priv flags */
+#define NIC_SRIOV_ENABLED BIT(0)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS 64
+#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define NIC_MAX_PKIND 16
+
+/* Rx Channels */
+/* Receive channel configuration in TNS bypass mode
+ * Below is configuration in TNS bypass mode
+ * BGX0-LMAC0-CHAN0 - VNIC CHAN0
+ * BGX0-LMAC1-CHAN0 - VNIC CHAN16
+ * ...
+ * BGX1-LMAC0-CHAN0 - VNIC CHAN128
+ * ...
+ * BGX1-LMAC3-CHAN0 - VNIC CHAN174
+ */
+#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
+#define NIC_CHANS_PER_INF 128
+#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
+
+/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
+#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
+#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
+#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
+#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
+
+/* Tx scheduling */
+#define NIC_MAX_TL4 1024
+#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
+#define NIC_MAX_TL3 256
+#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
+#define NIC_MAX_TL2 64
+#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
+#define NIC_MAX_TL1 2
+
+/* TNS bypass mode */
+#define NIC_TL2_PER_BGX 32
+#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
+#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+
+/* NIC VF Interrupts */
+#define NICVF_INTR_CQ 0
+#define NICVF_INTR_SQ 1
+#define NICVF_INTR_RBDR 2
+#define NICVF_INTR_PKT_DROP 3
+#define NICVF_INTR_TCP_TIMER 4
+#define NICVF_INTR_MBOX 5
+#define NICVF_INTR_QS_ERR 6
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define NIC_PF_MSIX_VECTORS 10
+#define NIC_VF_MSIX_VECTORS 20
+
+#define NIC_PF_INTR_ID_ECC0_SBE 0
+#define NIC_PF_INTR_ID_ECC0_DBE 1
+#define NIC_PF_INTR_ID_ECC1_SBE 2
+#define NIC_PF_INTR_ID_ECC1_DBE 3
+#define NIC_PF_INTR_ID_ECC2_SBE 4
+#define NIC_PF_INTR_ID_ECC2_DBE 5
+#define NIC_PF_INTR_ID_ECC3_SBE 6
+#define NIC_PF_INTR_ID_ECC3_DBE 7
+#define NIC_PF_INTR_ID_MBOX0 8
+#define NIC_PF_INTR_ID_MBOX1 9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.05usec = value of 2.2
+ * This 10% would be covered in CQ timer thresh value
+ */
+#define NICPF_CLK_PER_INT_TICK 2
+
+struct nicvf_cq_poll {
+ u8 cq_idx; /* Completion queue index */
+ struct napi_struct napi;
+};
+
+#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
+#define NIC_MAX_RSS_HASH_BITS 8
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
+
+struct nicvf_rss_info {
+ bool enable;
+#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
+#define RSS_IP_HASH_ENA BIT(1)
+#define RSS_TCP_HASH_ENA BIT(2)
+#define RSS_TCP_SYN_DIS BIT(3)
+#define RSS_UDP_HASH_ENA BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
+#define RSS_ROCE_ENA BIT(6)
+#define RSS_L3_BI_DIRECTION_ENA BIT(7)
+#define RSS_L4_BI_DIRECTION_ENA BIT(8)
+ u64 cfg;
+ u8 hash_bits;
+ u16 rss_size;
+ u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+enum rx_stats_reg_offset {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_RED = 0x4,
+ RX_RED_OCTS = 0x5,
+ RX_ORUN = 0x6,
+ RX_ORUN_OCTS = 0x7,
+ RX_FCS = 0x8,
+ RX_L2ERR = 0x9,
+ RX_DRP_BCAST = 0xa,
+ RX_DRP_MCAST = 0xb,
+ RX_DRP_L3BCAST = 0xc,
+ RX_DRP_L3MCAST = 0xd,
+ RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+ TX_OCTS = 0x0,
+ TX_UCAST = 0x1,
+ TX_BCAST = 0x2,
+ TX_MCAST = 0x3,
+ TX_DROP = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+ u64 rx_bytes_ok;
+ u64 rx_ucast_frames_ok;
+ u64 rx_bcast_frames_ok;
+ u64 rx_mcast_frames_ok;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+ u64 rx_drop_red;
+ u64 rx_drop_red_bytes;
+ u64 rx_drop_overrun;
+ u64 rx_drop_overrun_bytes;
+ u64 rx_drop_bcast;
+ u64 rx_drop_mcast;
+ u64 rx_drop_l3_bcast;
+ u64 rx_drop_l3_mcast;
+ u64 tx_bytes_ok;
+ u64 tx_ucast_frames_ok;
+ u64 tx_bcast_frames_ok;
+ u64 tx_mcast_frames_ok;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* Rx */
+ u64 rx_frames_ok;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_jumbo;
+ u64 rx_drops;
+ /* Tx */
+ u64 tx_frames_ok;
+ u64 tx_drops;
+ u64 tx_busy;
+ u64 tx_tso;
+};
+
+struct nicvf {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u8 vf_id;
+ u8 node;
+ u8 tns_mode;
+ u16 mtu;
+ struct queue_set *qs;
+ void __iomem *reg_base;
+ bool link_up;
+ u8 duplex;
+ u32 speed;
+ struct page *rb_page;
+ u32 rb_page_offset;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+ struct tasklet_struct qs_err_task;
+ struct tasklet_struct cq_task;
+ struct nicvf_cq_poll *napi[8];
+ struct nicvf_rss_info rss_info;
+ u8 cpi_alg;
+ /* Interrupt coalescing settings */
+ u32 cq_coalesce_usecs;
+
+ u32 msg_enable;
+ struct nicvf_hw_stats stats;
+ struct nicvf_drv_stats drv_stats;
+ struct bgx_stats bgx_stats;
+ struct work_struct reset_task;
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
+ char irq_name[NIC_VF_MSIX_VECTORS][20];
+ bool irq_allocated[NIC_VF_MSIX_VECTORS];
+
+ bool pf_ready_to_rcv_msg;
+ bool pf_acked;
+ bool pf_nacked;
+ bool bgx_stats_acked;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */
+
+struct nic_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 tns_mode;
+ u8 node_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ u8 msg;
+ u8 num;
+ u64 cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 rq_num;
+ u64 cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 sq_num;
+ u64 cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rq_cnt;
+ u8 cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 hash_bits;
+ u8 tbl_len;
+ u8 tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rx;
+ u8 idx;
+ u64 stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ u8 msg;
+ u8 link_up;
+ u8 duplex;
+ u32 speed;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+ struct { u8 msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_stats_msg bgx_stats;
+ struct bgx_link_status link_status;
+};
+
+#define NIC_NODE_ID_MASK 0x03
+#define NIC_NODE_ID_SHIFT 44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+ u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+ return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644
index 000000000000..6e0c03169a55
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nic"
+#define DRV_VERSION "1.0"
+
+struct nicpf {
+ struct pci_dev *pdev;
+ u8 rev_id;
+ u8 node;
+ unsigned int flags;
+ u8 num_vf_en; /* No of VF enabled */
+ bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
+ void __iomem *reg_base; /* Register start address */
+ struct pkind_cfg pkind;
+#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
+#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
+#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
+ u8 vf_lmac_map[MAX_LMAC];
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+ u8 link[MAX_LMAC];
+ u8 duplex[MAX_LMAC];
+ u32 speed[MAX_LMAC];
+ u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rss_ind_tbl_size;
+ bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ bool irq_allocated[NIC_PF_MSIX_VECTORS];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+ /* Enable mailbox interrupt for all 128 VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+ return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+ void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+ u64 *msg = (u64 *)mbx;
+
+ /* In first revision HW, mbox interrupt is triggerred
+ * when PF writes to MBOX(1), in next revisions when
+ * PF writes to MBOX(0)
+ */
+ if (nic->rev_id == 0) {
+ /* see the comment for nic_reg_write()/nic_reg_read()
+ * functions above
+ */
+ writeq_relaxed(msg[0], mbx_addr);
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ } else {
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ writeq_relaxed(msg[0], mbx_addr);
+ }
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ int bgx_idx, lmac;
+ const char *mac;
+
+ mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+ mbx.nic_cfg.vf_id = vf;
+
+ mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+
+ mbx.nic_cfg.node_id = nic->node;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_ACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_NACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+ u16 timeout = ~0x00;
+
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+ /* Wait till sync cycle is finished */
+ while (timeout) {
+ if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+ break;
+ timeout--;
+ }
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+ if (!timeout) {
+ dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+ return 1;
+ }
+ return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+ int bgx_idx, lmac;
+ union nic_mbx mbx = {};
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = bgx->vf_id;
+ mbx.bgx_stats.rx = bgx->rx;
+ mbx.bgx_stats.idx = bgx->idx;
+ if (bgx->rx)
+ mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ else
+ mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
+ dev_err(&nic->pdev->dev,
+ "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
+ vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ return 1;
+ }
+ new_frs += ETH_HLEN;
+ if (new_frs <= nic->pkind.maxlen)
+ return 0;
+
+ nic->pkind.maxlen = new_frs;
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+ int lmac;
+ u64 lmac_cfg;
+
+ /* Max value that can be set is 60 */
+ if (size > 60)
+ size = 60;
+
+ for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+ lmac_cfg &= ~(0xF << 2);
+ lmac_cfg |= ((size / 4) << 2);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+ }
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+ unsigned bgx_map = bgx_get_map(nic->node);
+ int bgx, next_bgx_lmac = 0;
+ int lmac, lmac_cnt = 0;
+ u64 lmac_credit;
+
+ nic->num_vf_en = 0;
+
+ for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ if (!(bgx_map & (1 << bgx)))
+ continue;
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ for (lmac = 0; lmac < lmac_cnt; lmac++)
+ nic->vf_lmac_map[next_bgx_lmac++] =
+ NIC_SET_VF_LMAC_MAP(bgx, lmac);
+ nic->num_vf_en += lmac_cnt;
+
+ /* Program LMAC credits */
+ lmac_credit = (1ull << 1); /* channel credit enable */
+ lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+ /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+ lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+ NIC_HW_MAX_FRS) / 16) << 12);
+ lmac = bgx * MAX_LMAC_PER_BGX;
+ for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+ lmac_credit);
+ }
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+ int i;
+
+ /* Reset NIC, in case the driver is repeatedly inserted and removed */
+ nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+ /* Enable backpressure */
+ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+ (1ULL << 63) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+ (1ULL << 63) | BGX1_BLOCK);
+
+ /* PKIND configuration */
+ nic->pkind.minlen = 0;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.lenerr_en = 1;
+ nic->pkind.rx_hdr = 0;
+ nic->pkind.hdr_sl = 0;
+
+ for (i = 0; i < NIC_MAX_PKIND; i++)
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+ *(u64 *)&nic->pkind);
+
+ nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+ /* Timer config */
+ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+ u32 vnic, bgx, lmac, chan;
+ u32 padd, cpi_count = 0;
+ u64 cpi_base, cpi, rssi_base, rssi;
+ u8 qset, rq_idx = 0;
+
+ vnic = cfg->vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
+ rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+
+ /* Rx channel configuration */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+ (1ull << 63) | (vnic << 0));
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+ ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+ if (cfg->cpi_alg == CPI_ALG_NONE)
+ cpi_count = 1;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+ cpi_count = 8;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+ cpi_count = 16;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+ cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+ /* RSS Qset, Qidx mapping */
+ qset = cfg->vf_id;
+ rssi = rssi_base;
+ for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | rq_idx);
+ rq_idx++;
+ }
+
+ rssi = 0;
+ cpi = cpi_base;
+ for (; cpi < (cpi_base + cpi_count); cpi++) {
+ /* Determine port to channel adder */
+ if (cfg->cpi_alg != CPI_ALG_DIFF)
+ padd = cpi % cpi_count;
+ else
+ padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+ /* Leave RSS_SIZE as '0' to disable RSS */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+
+ if ((rssi + 1) >= cfg->rq_cnt)
+ continue;
+
+ if (cfg->cpi_alg == CPI_ALG_VLAN)
+ rssi++;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+ rssi = ((cpi - cpi_base) & 0xe) >> 1;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF)
+ rssi = ((cpi - cpi_base) & 0x38) >> 3;
+ }
+ nic->cpi_base[cfg->vf_id] = cpi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *msg;
+
+ msg = (u64 *)&mbx;
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+ u8 qset, idx = 0;
+ u64 cpi_cfg, cpi_base, rssi_base, rssi;
+
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
+ rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+
+ rssi = rssi_base;
+ qset = cfg->vf_id;
+
+ for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+ idx++;
+ }
+
+ cpi_cfg &= ~(0xFULL << 20);
+ cpi_cfg |= (cfg->hash_bits << 20);
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0
+ * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+{
+ u32 bgx, lmac, chan;
+ u32 tl2, tl3, tl4;
+ u32 rr_quantum;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ /* 24 bytes for FCS, IPG and preamble */
+ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ tl4 += sq_idx;
+ tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+ ((u64)vnic << NIC_QS_ID_SHIFT) |
+ ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+ nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+ ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ /* Enable backpressure on the channel */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+ tl2 = tl3 >> 2;
+ nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+ nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+ /* No priorities as of now */
+ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ u64 reg_addr;
+ int bgx, lmac;
+ int i;
+ int ret = 0;
+
+ nic->mbx_lock[vf] = true;
+
+ mbx_addr = nic_get_mbx_addr(vf);
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nic_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ __func__, mbx.msg.msg, vf);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic_mbx_send_ready(nic, vf);
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ ret = 1;
+ break;
+ case NIC_MBOX_MSG_QS_CFG:
+ reg_addr = NIC_PF_QSET_0_127_CFG |
+ (mbx.qs.num << NIC_QS_ID_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_BP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_SW_SYNC:
+ ret = nic_rcv_queue_sw_sync(nic);
+ break;
+ case NIC_MBOX_MSG_RQ_DROP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_SQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+ (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+ nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+ break;
+ case NIC_MBOX_MSG_SET_MAC:
+ lmac = mbx.mac.vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+ break;
+ case NIC_MBOX_MSG_SET_MAX_FRS:
+ ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+ mbx.frs.vf_id);
+ break;
+ case NIC_MBOX_MSG_CPI_CFG:
+ nic_config_cpi(nic, &mbx.cpi_cfg);
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic_send_rss_size(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_RSS_CFG:
+ case NIC_MBOX_MSG_RSS_CFG_CONT:
+ nic_config_rss(nic, &mbx.rss_cfg);
+ break;
+ case NIC_MBOX_MSG_CFG_DONE:
+ /* Last message of VF config msg sequence */
+ nic->vf_enabled[vf] = true;
+ goto unlock;
+ case NIC_MBOX_MSG_SHUTDOWN:
+ /* First msg in VF teardown sequence */
+ nic->vf_enabled[vf] = false;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nic_get_bgx_stats(nic, &mbx.bgx_stats);
+ goto unlock;
+ default:
+ dev_err(&nic->pdev->dev,
+ "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+ break;
+ }
+
+ if (!ret)
+ nic_mbx_send_ack(nic, vf);
+ else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ nic_mbx_send_nack(nic, vf);
+unlock:
+ nic->mbx_lock[vf] = false;
+}
+
+static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+{
+ u64 intr;
+ u8 vf, vf_per_mbx_reg = 64;
+
+ intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+ dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < vf_per_mbx_reg; vf++) {
+ if (intr & (1ULL << vf)) {
+ dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+ vf + (mbx * vf_per_mbx_reg));
+ if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
+ break;
+ nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
+ nic_clear_mbx_intr(nic, vf, mbx);
+ }
+ }
+}
+
+static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 1);
+
+ return IRQ_HANDLED;
+}
+
+static int nic_enable_msix(struct nicpf *nic)
+{
+ int i, ret;
+
+ nic->num_vec = NIC_PF_MSIX_VECTORS;
+
+ for (i = 0; i < nic->num_vec; i++)
+ nic->msix_entries[i].entry = i;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed\n",
+ nic->num_vec);
+ return ret;
+ }
+
+ nic->msix_enabled = 1;
+ return 0;
+}
+
+static void nic_disable_msix(struct nicpf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+ int irq;
+
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+ int ret;
+
+ /* Enable MSI-X */
+ ret = nic_enable_msix(nic);
+ if (ret)
+ return ret;
+
+ /* Register mailbox interrupt handlers */
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
+ nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
+ nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+
+ /* Enable mailbox interrupt */
+ nic_enable_mbx_intr(nic);
+ return 0;
+
+fail:
+ dev_err(&nic->pdev->dev, "Request irq failed\n");
+ nic_free_all_interrupts(nic);
+ return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+ nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+ int pos = 0;
+ int err;
+ u16 total_vf_cnt;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+ if (total_vf_cnt < nic->num_vf_en)
+ nic->num_vf_en = total_vf_cnt;
+
+ if (!total_vf_cnt)
+ return 0;
+
+ err = pci_enable_sriov(pdev, nic->num_vf_en);
+ if (err) {
+ dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+ nic->num_vf_en);
+ nic->num_vf_en = 0;
+ return err;
+ }
+
+ dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+ nic->num_vf_en);
+
+ nic->flags |= NIC_SRIOV_ENABLED;
+ return 0;
+}
+
+/* Poll for BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_poll_for_link(struct work_struct *work)
+{
+ union nic_mbx mbx = {};
+ struct nicpf *nic;
+ struct bgx_link_status link;
+ u8 vf, bgx, lmac;
+
+ nic = container_of(work, struct nicpf, dwork.work);
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ for (vf = 0; vf < nic->num_vf_en; vf++) {
+ /* Poll only if VF is UP */
+ if (!nic->vf_enabled[vf])
+ continue;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Inform VF only if link status changed */
+ if (nic->link[vf] == link.link_up)
+ continue;
+
+ if (!nic->mbx_lock[vf]) {
+ nic->link[vf] = link.link_up;
+ nic->duplex[vf] = link.duplex;
+ nic->speed[vf] = link.speed;
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+ }
+ }
+ queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct nicpf *nic;
+ int err;
+
+ BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+ nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+ if (!nic)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, nic);
+
+ nic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
+
+ nic->node = nic_get_node_id(pdev);
+
+ nic_set_lmac_vf_mapping(nic);
+
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
+ /* Set RSS TBL size for each VF */
+ nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+ /* Register interrupts */
+ err = nic_register_interrupts(nic);
+ if (err)
+ goto err_release_regions;
+
+ /* Configure SRIOV */
+ err = nic_sriov_init(pdev, nic);
+ if (err)
+ goto err_unregister_interrupts;
+
+ /* Register a physical link status poll fn() */
+ nic->check_link = alloc_workqueue("check_link_status",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!nic->check_link) {
+ err = -ENOMEM;
+ goto err_disable_sriov;
+ }
+
+ INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
+ queue_delayed_work(nic->check_link, &nic->dwork, 0);
+
+ return 0;
+
+err_disable_sriov:
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+err_unregister_interrupts:
+ nic_unregister_interrupts(nic);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+ struct nicpf *nic = pci_get_drvdata(pdev);
+
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+
+ if (nic->check_link) {
+ /* Destroy work Queue */
+ cancel_delayed_work(&nic->dwork);
+ flush_workqueue(nic->check_link);
+ destroy_workqueue(nic->check_link);
+ }
+
+ nic_unregister_interrupts(nic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+ .name = DRV_NAME,
+ .id_table = nic_id_table,
+ .probe = nic_probe,
+ .remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+ pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644
index 000000000000..58197bb2f805
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define NIC_PF_REG_COUNT 29573
+#define NIC_VF_REG_COUNT 249
+
+/* Physical function register offsets */
+#define NIC_PF_CFG (0x0000)
+#define NIC_PF_STATUS (0x0010)
+#define NIC_PF_INTR_TIMER_CFG (0x0030)
+#define NIC_PF_BIST_STATUS (0x0040)
+#define NIC_PF_SOFT_RESET (0x0050)
+#define NIC_PF_TCP_TIMER (0x0060)
+#define NIC_PF_BP_CFG (0x0080)
+#define NIC_PF_RRM_CFG (0x0088)
+#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CNM_CF (0x00A8)
+#define NIC_PF_CNM_STATUS (0x00B0)
+#define NIC_PF_CQ_AVG_CFG (0x00C0)
+#define NIC_PF_RRM_AVG_CFG (0x00C8)
+#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
+#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
+#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
+#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
+#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
+#define NIC_PF_MAILBOX_INT (0x0410)
+#define NIC_PF_MAILBOX_INT_W1S (0x0430)
+#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
+#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
+#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_PKIND_0_15_CFG (0x0600)
+#define NIC_PF_ECC0_FLIP0 (0x1000)
+#define NIC_PF_ECC1_FLIP0 (0x1008)
+#define NIC_PF_ECC2_FLIP0 (0x1010)
+#define NIC_PF_ECC3_FLIP0 (0x1018)
+#define NIC_PF_ECC0_FLIP1 (0x1080)
+#define NIC_PF_ECC1_FLIP1 (0x1088)
+#define NIC_PF_ECC2_FLIP1 (0x1090)
+#define NIC_PF_ECC3_FLIP1 (0x1098)
+#define NIC_PF_ECC0_CDIS (0x1100)
+#define NIC_PF_ECC1_CDIS (0x1108)
+#define NIC_PF_ECC2_CDIS (0x1110)
+#define NIC_PF_ECC3_CDIS (0x1118)
+#define NIC_PF_BIST0_STATUS (0x1280)
+#define NIC_PF_BIST1_STATUS (0x1288)
+#define NIC_PF_BIST2_STATUS (0x1290)
+#define NIC_PF_BIST3_STATUS (0x1298)
+#define NIC_PF_ECC0_SBE_INT (0x2000)
+#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
+#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
+#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
+#define NIC_PF_ECC0_DBE_INT (0x2100)
+#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
+#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
+#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
+#define NIC_PF_ECC1_SBE_INT (0x2200)
+#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
+#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
+#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
+#define NIC_PF_ECC1_DBE_INT (0x2300)
+#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
+#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
+#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
+#define NIC_PF_ECC2_SBE_INT (0x2400)
+#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
+#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
+#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
+#define NIC_PF_ECC2_DBE_INT (0x2500)
+#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
+#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
+#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
+#define NIC_PF_ECC3_SBE_INT (0x2600)
+#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
+#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
+#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
+#define NIC_PF_ECC3_DBE_INT (0x2700)
+#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
+#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
+#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_RSSI_0_4097_RQ (0x220000)
+#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
+#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
+#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
+#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
+#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
+#define NIC_PF_SW_SYNC_RX (0x490000)
+#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
+#define NIC_PF_TL2_0_63_CFG (0x500000)
+#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
+#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
+#define NIC_PF_TL3_0_255_CFG (0x600000)
+#define NIC_PF_TL3_0_255_CHAN (0x620000)
+#define NIC_PF_TL3_0_255_PIR (0x640000)
+#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
+#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
+#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
+#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
+#define NIC_PF_TL4_0_1023_CFG (0x800000)
+#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
+#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
+#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
+#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
+#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
+#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
+#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
+#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
+#define NIC_PF_QSET_0_127_CFG (0x20010000)
+#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
+#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
+#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
+#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
+#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
+
+#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
+#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
+#define NIC_PF_MSIX_PBA_0 (0x0F0000)
+
+/* Virtual function register offsets */
+#define NIC_VNIC_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_QSET_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
+#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
+
+#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
+#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
+#define NIC_VF_MSIX_PBA (0x0F0000)
+
+/* Offsets within registers */
+#define NIC_MSIX_VEC_SHIFT 4
+#define NIC_Q_NUM_SHIFT 18
+#define NIC_QS_ID_SHIFT 21
+#define NIC_VF_NUM_SHIFT 21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63:22;
+ u64 hdr_sl:5; /* Header skip length */
+ u64 rx_hdr:3; /* TNS Receive header present */
+ u64 lenerr_en:1;/* L2 length error check enable */
+ u64 reserved_32_32:1;
+ u64 maxlen:16; /* Max frame size */
+ u64 minlen:16; /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 minlen:16;
+ u64 maxlen:16;
+ u64 reserved_32_32:1;
+ u64 lenerr_en:1;
+ u64 rx_hdr:3;
+ u64 hdr_sl:5;
+ u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644
index 000000000000..16bd2d772db9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/pci.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+struct nicvf_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+ NICVF_HW_STAT(rx_bytes_ok),
+ NICVF_HW_STAT(rx_ucast_frames_ok),
+ NICVF_HW_STAT(rx_bcast_frames_ok),
+ NICVF_HW_STAT(rx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drop_red),
+ NICVF_HW_STAT(rx_drop_red_bytes),
+ NICVF_HW_STAT(rx_drop_overrun),
+ NICVF_HW_STAT(rx_drop_overrun_bytes),
+ NICVF_HW_STAT(rx_drop_bcast),
+ NICVF_HW_STAT(rx_drop_mcast),
+ NICVF_HW_STAT(rx_drop_l3_bcast),
+ NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(tx_bytes_ok),
+ NICVF_HW_STAT(tx_ucast_frames_ok),
+ NICVF_HW_STAT(tx_bcast_frames_ok),
+ NICVF_HW_STAT(tx_mcast_frames_ok),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+ NICVF_DRV_STAT(rx_frames_ok),
+ NICVF_DRV_STAT(rx_frames_64),
+ NICVF_DRV_STAT(rx_frames_127),
+ NICVF_DRV_STAT(rx_frames_255),
+ NICVF_DRV_STAT(rx_frames_511),
+ NICVF_DRV_STAT(rx_frames_1023),
+ NICVF_DRV_STAT(rx_frames_1518),
+ NICVF_DRV_STAT(rx_frames_jumbo),
+ NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(tx_frames_ok),
+ NICVF_DRV_STAT(tx_busy),
+ NICVF_DRV_STAT(tx_tso),
+ NICVF_DRV_STAT(tx_drops),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->supported = 0;
+ cmd->transceiver = XCVR_EXTERNAL;
+ if (nic->speed <= 1000) {
+ cmd->port = PORT_MII;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->autoneg = AUTONEG_DISABLE;
+ }
+ cmd->duplex = nic->duplex;
+ ethtool_cmd_speed_set(cmd, nic->speed);
+
+ return 0;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ nic->msg_enable = lvl;
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ int stats, qidx;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+ memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+ memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "rxq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "txq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return nicvf_n_hw_stats + nicvf_n_drv_stats +
+ (nicvf_n_queue_stats *
+ (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+ BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stat, qidx;
+
+ nicvf_update_stats(nic);
+
+ /* Update LMAC stats */
+ nicvf_update_lmac_stats(nic);
+
+ for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+ *(data++) = ((u64 *)&nic->stats)
+ [nicvf_hw_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++)
+ *(data++) = ((u64 *)&nic->drv_stats)
+ [nicvf_drv_stats[stat].index];
+
+ for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.rx_stats[stat];
+ for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+ return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *reg)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ u64 *p = (u64 *)reg;
+ u64 reg_offset;
+ int mbox, key, stat, q;
+ int i = 0;
+
+ regs->version = 0;
+ memset(p, 0, NIC_VF_REG_COUNT);
+
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+ /* Mailbox registers */
+ for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+ /* Tx/Rx statistics */
+ for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+ /* All completion queue's registers */
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+ }
+
+ /* All receive queue's registers */
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS1, q);
+ reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+ return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+
+ ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+ ring->rx_pending = qs->rbdr_len;
+ ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+ ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nic->qs->rq_cnt;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return nicvf_get_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ if (!rss->enable)
+ netdev_err(nic->netdev,
+ "RSS is disabled, hash cannot be set\n");
+
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ info->flow_type, info->data);
+
+ if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_UDP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = RSS_HASH_IP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+ return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return nicvf_set_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, u8 hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return -EIO;
+ }
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss->enable = true;
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ nicvf_set_rss_key(nic);
+ }
+
+ nicvf_config_rss(nic);
+ return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = MAX_RCV_QUEUES_PER_QS;
+ channel->max_tx = MAX_SND_QUEUES_PER_QS;
+
+ channel->rx_count = nic->qs->rq_cnt;
+ channel->tx_count = nic->qs->sq_cnt;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int err = 0;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+ if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+ return -EINVAL;
+ if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+ return -EINVAL;
+
+ nic->qs->rq_cnt = channel->rx_count;
+ nic->qs->sq_cnt = channel->tx_count;
+ nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+ if (err)
+ return err;
+
+ if (!netif_running(dev))
+ return err;
+
+ nicvf_stop(dev);
+ nicvf_open(dev);
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ nic->qs->sq_cnt, nic->qs->rq_cnt);
+
+ return err;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+ .get_settings = nicvf_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = nicvf_get_drvinfo,
+ .get_msglevel = nicvf_get_msglevel,
+ .set_msglevel = nicvf_set_msglevel,
+ .get_strings = nicvf_get_strings,
+ .get_sset_count = nicvf_get_sset_count,
+ .get_ethtool_stats = nicvf_get_ethtool_stats,
+ .get_regs_len = nicvf_get_regs_len,
+ .get_regs = nicvf_get_regs,
+ .get_coalesce = nicvf_get_coalesce,
+ .get_ringparam = nicvf_get_ringparam,
+ .get_rxnfc = nicvf_get_rxnfc,
+ .set_rxnfc = nicvf_set_rxnfc,
+ .get_rxfh_key_size = nicvf_get_rxfh_key_size,
+ .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
+ .get_rxfh = nicvf_get_rxfh,
+ .set_rxfh = nicvf_set_rxfh,
+ .get_channels = nicvf_get_channels,
+ .set_channels = nicvf_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644
index 000000000000..02da802d3288
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, S_IRUGO);
+MODULE_PARM_DESC(cpi_alg,
+ "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
+ struct sk_buff *skb)
+{
+ if (skb->len <= 64)
+ nic->drv_stats.rx_frames_64++;
+ else if (skb->len <= 127)
+ nic->drv_stats.rx_frames_127++;
+ else if (skb->len <= 255)
+ nic->drv_stats.rx_frames_255++;
+ else if (skb->len <= 511)
+ nic->drv_stats.rx_frames_511++;
+ else if (skb->len <= 1023)
+ nic->drv_stats.rx_frames_1023++;
+ else if (skb->len <= 1518)
+ nic->drv_stats.rx_frames_1518++;
+ else
+ nic->drv_stats.rx_frames_jumbo++;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+ u64 *msg = (u64 *)mbx;
+
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+ int timeout = NIC_MBOX_MSG_TIMEOUT;
+ int sleep = 10;
+
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+
+ nicvf_write_to_mbx(nic, mbx);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!nic->pf_acked) {
+ if (nic->pf_nacked)
+ return -EINVAL;
+ msleep(sleep);
+ if (nic->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't ack to mbox msg %d from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+ int timeout = 5000, sleep = 20;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_READY;
+
+ nic->pf_ready_to_rcv_msg = false;
+
+ nicvf_write_to_mbx(nic, &mbx);
+
+ while (!nic->pf_ready_to_rcv_msg) {
+ msleep(sleep);
+ if (nic->pf_ready_to_rcv_msg)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+ if (bgx->rx)
+ nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+ else
+ nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->pf_ready_to_rcv_msg = true;
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr);
+ nic->link_up = false;
+ nic->duplex = 0;
+ nic->speed = 0;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+ nic->pf_acked = true;
+ nic->bgx_stats_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->pf_acked = true;
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ if (nic->link_up) {
+ netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+ nic->netdev->name, nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full duplex" : "Half duplex");
+ netif_carrier_on(nic->netdev);
+ netif_tx_wake_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "%s: Link is Down\n",
+ nic->netdev->name);
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+ break;
+ }
+ nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+ union nic_mbx mbx = {};
+
+ mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int ind_tbl_len = rss->rss_size;
+ int i, nextq = 0;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ while (ind_tbl_len) {
+ mbx.rss_cfg.tbl_offset = nextq;
+ mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+ RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+ for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ ind_tbl_len -= mbx.rss_cfg.tbl_len;
+ }
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+ int idx;
+
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ nicvf_reg_write(nic, key_addr, rss->key[idx]);
+ key_addr += sizeof(u64);
+ }
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ nicvf_get_rss_size(nic);
+
+ if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return 0;
+ }
+
+ rss->enable = true;
+
+ /* Using the HW reset value for now */
+ rss->key[0] = 0xFEED0BADFEED0BADULL;
+ rss->key[1] = 0xFEED0BADFEED0BADULL;
+ rss->key[2] = 0xFEED0BADFEED0BADULL;
+ rss->key[3] = 0xFEED0BADFEED0BADULL;
+ rss->key[4] = 0xFEED0BADFEED0BADULL;
+
+ nicvf_set_rss_key(nic);
+
+ rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+ rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
+
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+ nic->qs->rq_cnt);
+ nicvf_config_rss(nic);
+ return 1;
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err = 0;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+ int err;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+
+ /* Enable Qset */
+ nicvf_qset_config(nic, true);
+
+ /* Initialize queues and HW for data transfer */
+ err = nicvf_config_data_transfer(nic, true);
+ if (err) {
+ netdev_err(nic->netdev,
+ "Failed to alloc/config VF's QSet resources\n");
+ return err;
+ }
+
+ /* Send VF config done msg to PF */
+ nicvf_write_to_mbx(nic, &mbx);
+
+ return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+ struct cmp_queue *cq,
+ struct cqe_send_t *cqe_tx, int cqe_type)
+{
+ struct sk_buff *skb = NULL;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct snd_queue *sq;
+ struct sq_hdr_subdesc *hdr;
+
+ sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+ return;
+
+ netdev_dbg(nic->netdev,
+ "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
+ __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
+ cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+ /* For TSO offloaded packets only one head SKB needs to be freed */
+ if (skb) {
+ prefetch(skb);
+ dev_consume_skb_any(skb);
+ }
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+ struct napi_struct *napi,
+ struct cmp_queue *cq,
+ struct cqe_rx_t *cqe_rx, int cqe_type)
+{
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ int err = 0;
+
+ /* Check for errors */
+ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+
+ skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ if (!skb) {
+ netdev_dbg(nic->netdev, "Packet not received\n");
+ return;
+ }
+
+ if (netif_msg_pktdata(nic)) {
+ netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
+ skb, skb->len);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+ }
+
+ nicvf_set_rx_frame_cnt(nic, skb);
+
+ skb_record_rx_queue(skb, cqe_rx->rq_idx);
+ if (netdev->hw_features & NETIF_F_RXCSUM) {
+ /* HW by default verifies TCP/UDP/SCTP checksums */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (napi && (netdev->features & NETIF_F_GRO))
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+ struct napi_struct *napi, int budget)
+{
+ int processed_cqe, work_done = 0;
+ int cqe_count, cqe_head;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct cmp_queue *cq = &qs->cq[cq_idx];
+ struct cqe_rx_t *cq_desc;
+
+ spin_lock_bh(&cq->lock);
+loop:
+ processed_cqe = 0;
+ /* Get no of valid CQ entries to process */
+ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+ cqe_count &= CQ_CQE_COUNT;
+ if (!cqe_count)
+ goto done;
+
+ /* Get head of the valid CQ entries */
+ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+ cqe_head &= 0xFFFF;
+
+ netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
+ __func__, cqe_count, cqe_head);
+ while (processed_cqe < cqe_count) {
+ /* Get the CQ descriptor */
+ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+ cqe_head++;
+ cqe_head &= (cq->dmem.q_len - 1);
+ /* Initiate prefetch for next descriptor */
+ prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+ if ((work_done >= budget) && napi &&
+ (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+ break;
+ }
+
+ netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
+ cq_desc->cqe_type);
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+ nicvf_rcv_pkt_handler(netdev, napi, cq,
+ cq_desc, CQE_TYPE_RX);
+ work_done++;
+ break;
+ case CQE_TYPE_SEND:
+ nicvf_snd_pkt_handler(netdev, cq,
+ (void *)cq_desc, CQE_TYPE_SEND);
+ break;
+ case CQE_TYPE_INVALID:
+ case CQE_TYPE_RX_SPLIT:
+ case CQE_TYPE_RX_TCP:
+ case CQE_TYPE_SEND_PTP:
+ /* Ignore for now */
+ break;
+ }
+ processed_cqe++;
+ }
+ netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
+ __func__, processed_cqe, work_done, budget);
+
+ /* Ring doorbell to inform H/W to reuse processed CQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+ cq_idx, processed_cqe);
+
+ if ((work_done < budget) && napi)
+ goto loop;
+
+done:
+ spin_unlock_bh(&cq->lock);
+ return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+ u64 cq_head;
+ int work_done = 0;
+ struct net_device *netdev = napi->dev;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_cq_poll *cq;
+ struct netdev_queue *txq;
+
+ cq = container_of(napi, struct nicvf_cq_poll, napi);
+ work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+ txq = netdev_get_tx_queue(netdev, cq->cq_idx);
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+
+ if (work_done < budget) {
+ /* Slow packet rate, exit polling */
+ napi_complete(napi);
+ /* Re-enable interrupts */
+ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx, cq_head);
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ }
+ return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+ u64 status;
+
+ netif_tx_disable(nic->netdev);
+
+ /* Check if it is CQ err */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+ qidx);
+ if (!(status & CQ_ERR_MASK))
+ continue;
+ /* Process already queued CQEs and reconfig CQ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_sq_disable(nic, qidx);
+ nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+ nicvf_cmp_queue_config(nic, qs, qidx, true);
+ nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+ nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+ }
+
+ netif_tx_start_all_queues(nic->netdev);
+ /* Re-enable Qset error interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ u64 intr;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ /* Check for spurious interrupt */
+ if (!(intr & NICVF_INTR_MBOX_MASK))
+ return IRQ_HANDLED;
+
+ nicvf_handle_mbx_intr(nic);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+{
+ u64 qidx, intr, clear_intr = 0;
+ u64 cq_intr, rbdr_intr, qs_err_intr;
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ if (netif_msg_intr(nic))
+ netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+ nic->netdev->name, intr);
+
+ qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
+ if (qs_err_intr) {
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ clear_intr |= qs_err_intr;
+ }
+
+ /* Disable interrupts and start polling */
+ cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (!(cq_intr & (1 << qidx)))
+ continue;
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+ continue;
+
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+
+ cq_poll = nic->napi[qidx];
+ /* Schedule NAPI */
+ if (cq_poll)
+ napi_schedule(&cq_poll->napi);
+ }
+
+ /* Handle RBDR interrupts */
+ rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
+ if (rbdr_intr) {
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+ continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
+ }
+ }
+
+ /* Clear interrupts */
+ nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+ return IRQ_HANDLED;
+}
+
+static int nicvf_enable_msix(struct nicvf *nic)
+{
+ int ret, vec;
+
+ nic->num_vec = NIC_VF_MSIX_VECTORS;
+
+ for (vec = 0; vec < nic->num_vec; vec++)
+ nic->msix_entries[vec].entry = vec;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
+ return 0;
+ }
+ nic->msix_enabled = 1;
+ return 1;
+}
+
+static void nicvf_disable_msix(struct nicvf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+ int irq, free, ret = 0;
+ int vector;
+
+ for_each_cq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
+ nic->vf_id, irq);
+
+ for_each_sq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
+ nic->vf_id, irq - NICVF_INTR_ID_SQ);
+
+ for_each_rbdr_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
+ nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+
+ /* Register all interrupts except mailbox */
+ for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
+ "NICVF%d Qset error", nic->vf_id);
+ if (!ret) {
+ vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (!ret)
+ nic->irq_allocated[irq] = true;
+ }
+
+ if (ret) {
+ netdev_err(nic->netdev, "Request irq failed\n");
+ for (free = 0; free < irq; free++)
+ free_irq(nic->msix_entries[free].vector, nic);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+ int irq;
+
+ /* Free registered interrupts */
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+
+ /* Disable MSI-X */
+ nicvf_disable_msix(nic);
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+ int ret = 0;
+ int irq = NICVF_INTR_ID_MISC;
+
+ /* Return if mailbox interrupt is already registered */
+ if (nic->msix_enabled)
+ return 0;
+
+ /* Enable MSI-X */
+ if (!nicvf_enable_msix(nic))
+ return 1;
+
+ sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+ /* Register Misc interrupt */
+ ret = request_irq(nic->msix_entries[irq].vector,
+ nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+ if (ret)
+ return ret;
+ nic->irq_allocated[irq] = true;
+
+ /* Enable mailbox interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ /* Check if VF is able to communicate with PF */
+ if (!nicvf_check_pf_ready(nic)) {
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qid = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+
+ /* Check for minimum packet length */
+ if (skb->len <= ETH_HLEN) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ nic->drv_stats.tx_busy++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit ring full, stopping SQ%d\n",
+ netdev->name, qid);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+ int irq, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ /* Disable RBDR & QS error interrupts */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Wait for pending IRQ handlers to finish */
+ for (irq = 0; irq < nic->num_vec; irq++)
+ synchronize_irq(nic->msix_entries[irq].vector);
+
+ tasklet_kill(&nic->rbdr_task);
+ tasklet_kill(&nic->qs_err_task);
+ if (nic->rb_work_scheduled)
+ cancel_delayed_work_sync(&nic->rbdr_work);
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ napi_synchronize(&cq_poll->napi);
+ /* CQ intr is enabled while napi_complete,
+ * so disable it now
+ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ }
+
+ /* Free resources */
+ nicvf_config_data_transfer(nic, false);
+
+ /* Disable HW Qset */
+ nicvf_qset_config(nic, false);
+
+ /* disable mailbox interrupt */
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ nicvf_unregister_interrupts(nic);
+
+ return 0;
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+ int err, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ nic->mtu = netdev->mtu;
+
+ netif_carrier_off(netdev);
+
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ return err;
+
+ /* Register NAPI handler for processing CQEs */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+ if (!cq_poll) {
+ err = -ENOMEM;
+ goto napi_del;
+ }
+ cq_poll->cq_idx = qidx;
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ nic->napi[qidx] = cq_poll;
+ }
+
+ /* Check if we got MAC address from PF or else generate a radom MAC */
+ if (is_zero_ether_addr(netdev->dev_addr)) {
+ eth_hw_addr_random(netdev);
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ /* Init tasklet for handling Qset err interrupt */
+ tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
+ (unsigned long)nic);
+
+ /* Init RBDR tasklet which will refill RBDR */
+ tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
+ (unsigned long)nic);
+ INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+ /* Configure CPI alorithm */
+ nic->cpi_alg = cpi_alg;
+ nicvf_config_cpi(nic);
+
+ /* Configure receive side scaling */
+ nicvf_rss_init(nic);
+
+ err = nicvf_register_interrupts(nic);
+ if (err)
+ goto cleanup;
+
+ /* Initialize the queues */
+ err = nicvf_init_resources(nic);
+ if (err)
+ goto cleanup;
+
+ /* Make sure queue initialization is written */
+ wmb();
+
+ nicvf_reg_write(nic, NIC_VF_INT, -1);
+ /* Enable Qset err interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Enable completion queue interrupt */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Enable RBDR threshold interrupt */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+cleanup:
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+napi_del:
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ nic->napi[qidx] = NULL;
+ }
+ return err;
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (new_mtu > NIC_HW_MAX_FRS)
+ return -EINVAL;
+
+ if (new_mtu < NIC_HW_MIN_FRS)
+ return -EINVAL;
+
+ if (nicvf_update_hw_max_frs(nic, new_mtu))
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ nic->mtu = new_mtu;
+
+ return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ if (nic->msix_enabled)
+ if (nicvf_hw_set_mac_addr(nic, netdev))
+ return -EBUSY;
+
+ return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+ int stat = 0;
+ union nic_mbx mbx = {};
+ int timeout;
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = nic->vf_id;
+ /* Rx stats */
+ mbx.bgx_stats.rx = 1;
+ while (stat < BGX_RX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+
+ stat = 0;
+
+ /* Tx stats */
+ mbx.bgx_stats.rx = 0;
+ while (stat < BGX_TX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+ int qidx;
+ struct nicvf_hw_stats *stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+ stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
+ stats->rx_bcast_frames_ok +
+ stats->rx_mcast_frames_ok;
+ drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+ stats->tx_bcast_frames_ok +
+ stats->tx_mcast_frames_ok;
+ drv_stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
+ drv_stats->tx_drops = stats->tx_drops;
+
+ /* Update RQ and SQ stats */
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_update_rq_stats(nic, qidx);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_update_sq_stats(nic, qidx);
+}
+
+static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_hw_stats *hw_stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+ nicvf_update_stats(nic);
+
+ stats->rx_bytes = hw_stats->rx_bytes_ok;
+ stats->rx_packets = drv_stats->rx_frames_ok;
+ stats->rx_dropped = drv_stats->rx_drops;
+
+ stats->tx_bytes = hw_stats->tx_bytes_ok;
+ stats->tx_packets = drv_stats->tx_frames_ok;
+ stats->tx_dropped = drv_stats->tx_drops;
+
+ return stats;
+}
+
+static void nicvf_tx_timeout(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ if (netif_msg_tx_err(nic))
+ netdev_warn(dev, "%s: Transmit timed out, resetting\n",
+ dev->name);
+
+ schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+ struct nicvf *nic;
+
+ nic = container_of(work, struct nicvf, reset_task);
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ nicvf_stop(nic->netdev);
+ nicvf_open(nic->netdev);
+ nic->netdev->trans_start = jiffies;
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+ .ndo_open = nicvf_open,
+ .ndo_stop = nicvf_stop,
+ .ndo_start_xmit = nicvf_xmit,
+ .ndo_change_mtu = nicvf_change_mtu,
+ .ndo_set_mac_address = nicvf_set_mac_address,
+ .ndo_get_stats64 = nicvf_get_stats64,
+ .ndo_tx_timeout = nicvf_tx_timeout,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct nicvf *nic;
+ struct queue_set *qs;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
+ MAX_RCV_QUEUES_PER_QS,
+ MAX_SND_QUEUES_PER_QS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ nic = netdev_priv(netdev);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+
+ /* MAP VF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = nicvf_set_qset_resources(nic);
+ if (err)
+ goto err_free_netdev;
+
+ qs = nic->qs;
+
+ err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
+ if (err)
+ goto err_free_netdev;
+
+ /* Check if PF is alive and get MAC address for this VF */
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ goto err_free_netdev;
+
+ netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO);
+ netdev->hw_features = netdev->features;
+
+ netdev->netdev_ops = &nicvf_netdev_ops;
+
+ INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_unregister_interrupts;
+ }
+
+ nic->msg_enable = debug;
+
+ nicvf_set_ethtool_ops(netdev);
+
+ return 0;
+
+err_unregister_interrupts:
+ nicvf_unregister_interrupts(nic);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nicvf *nic = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ nicvf_unregister_interrupts(nic);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+ .name = DRV_NAME,
+ .id_table = nicvf_id_table,
+ .probe = nicvf_probe,
+ .remove = nicvf_remove,
+};
+
+static int __init nicvf_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+ pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644
index 000000000000..d69d228d11a0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+struct rbuf_info {
+ struct page *page;
+ void *data;
+ u64 offset;
+};
+
+#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+ u64 reg, int bit_pos, int bits, int val)
+{
+ u64 bit_mask;
+ u64 reg_val;
+ int timeout = 10;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+ return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+ int q_len, int desc_size, int align_bytes)
+{
+ dmem->q_len = q_len;
+ dmem->size = (desc_size * q_len) + align_bytes;
+ /* Save address, need it while freeing */
+ dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ &dmem->dma, GFP_KERNEL);
+ if (!dmem->unalign_base)
+ return -ENOMEM;
+
+ /* Align memory address for 'align_bytes' */
+ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+ dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+ return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+ if (!dmem)
+ return;
+
+ dma_free_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base, dmem->dma);
+ dmem->unalign_base = NULL;
+ dmem->base = NULL;
+}
+
+/* Allocate buffer for packet reception
+ * HW returns memory address where packet is DMA'ed but not a pointer
+ * into RBDR ring, so save buffer address at the start of fragment and
+ * align the start address to a cache aligned address
+ */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
+ u32 buf_len, u64 **rbuf)
+{
+ u64 data;
+ struct rbuf_info *rinfo;
+ int order = get_order(buf_len);
+
+ /* Check if request can be accomodated in previous allocated page */
+ if (nic->rb_page) {
+ if ((nic->rb_page_offset + buf_len + buf_len) >
+ (PAGE_SIZE << order)) {
+ nic->rb_page = NULL;
+ } else {
+ nic->rb_page_offset += buf_len;
+ get_page(nic->rb_page);
+ }
+ }
+
+ /* Allocate a new page */
+ if (!nic->rb_page) {
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+ if (!nic->rb_page) {
+ netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+ return -ENOMEM;
+ }
+ nic->rb_page_offset = 0;
+ }
+
+ data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
+
+ /* Align buffer addr to cache line i.e 128 bytes */
+ rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
+ /* Save page address for reference updation */
+ rinfo->page = nic->rb_page;
+ /* Store start address for later retrieval */
+ rinfo->data = (void *)data;
+ /* Store alignment offset */
+ rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+
+ data += rinfo->offset;
+
+ /* Give next aligned address to hw for DMA */
+ *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
+ return 0;
+}
+
+/* Retrieve actual buffer start address and build skb for received packet */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+ u64 rb_ptr, int len)
+{
+ struct sk_buff *skb;
+ struct rbuf_info *rinfo;
+
+ rb_ptr = (u64)phys_to_virt(rb_ptr);
+ /* Get buffer start address and alignment offset */
+ rinfo = GET_RBUF_INFO(rb_ptr);
+
+ /* Now build an skb to give to stack */
+ skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+ if (!skb) {
+ put_page(rinfo->page);
+ return NULL;
+ }
+
+ /* Set correct skb->data */
+ skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
+
+ prefetch((void *)rb_ptr);
+ return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+ int ring_len, int buf_size)
+{
+ int idx;
+ u64 *rbuf;
+ struct rbdr_entry_t *desc;
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+ sizeof(struct rbdr_entry_t),
+ NICVF_RCV_BUF_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ rbdr->desc = rbdr->dmem.base;
+ /* Buffer size has to be in multiples of 128 bytes */
+ rbdr->dma_size = buf_size;
+ rbdr->enable = true;
+ rbdr->thresh = RBDR_THRESH;
+
+ nic->rb_page = NULL;
+ for (idx = 0; idx < ring_len; idx++) {
+ err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
+ &rbuf);
+ if (err)
+ return err;
+
+ desc = GET_RBDR_DESC(rbdr, idx);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ }
+ return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+ int head, tail;
+ u64 buf_addr;
+ struct rbdr_entry_t *desc;
+ struct rbuf_info *rinfo;
+
+ if (!rbdr)
+ return;
+
+ rbdr->enable = false;
+ if (!rbdr->dmem.base)
+ return;
+
+ head = rbdr->head;
+ tail = rbdr->tail;
+
+ /* Free SKBs */
+ while (head != tail) {
+ desc = GET_RBDR_DESC(rbdr, head);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+ head++;
+ head &= (rbdr->dmem.q_len - 1);
+ }
+ /* Free SKB of tail desc */
+ desc = GET_RBDR_DESC(rbdr, tail);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+
+ /* Free RBDR ring */
+ nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+ struct queue_set *qs = nic->qs;
+ int rbdr_idx = qs->rbdr_cnt;
+ int tail, qcount;
+ int refill_rb_cnt;
+ struct rbdr *rbdr;
+ struct rbdr_entry_t *desc;
+ u64 *rbuf;
+ int new_rb = 0;
+
+refill:
+ if (!rbdr_idx)
+ return;
+ rbdr_idx--;
+ rbdr = &qs->rbdr[rbdr_idx];
+ /* Check if it's enabled */
+ if (!rbdr->enable)
+ goto next_rbdr;
+
+ /* Get no of desc's to be refilled */
+ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+ qcount &= 0x7FFFF;
+ /* Doorbell can be ringed with a max of ring size minus 1 */
+ if (qcount >= (qs->rbdr_len - 1))
+ goto next_rbdr;
+ else
+ refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+ /* Start filling descs from tail */
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+ while (refill_rb_cnt) {
+ tail++;
+ tail &= (rbdr->dmem.q_len - 1);
+
+ if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+ break;
+
+ desc = GET_RBDR_DESC(rbdr, tail);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ refill_rb_cnt--;
+ new_rb++;
+ }
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Check if buffer allocation failed */
+ if (refill_rb_cnt)
+ nic->rb_alloc_fail = true;
+ else
+ nic->rb_alloc_fail = false;
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ rbdr_idx, new_rb);
+next_rbdr:
+ /* Re-enable RBDR interrupts only if buffer allocation is success */
+ if (!nic->rb_alloc_fail && rbdr->enable)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+ if (rbdr_idx)
+ goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+ struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+ nicvf_refill_rbdr(nic, GFP_KERNEL);
+ if (nic->rb_alloc_fail)
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ else
+ nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+
+ nicvf_refill_rbdr(nic, GFP_ATOMIC);
+ if (nic->rb_alloc_fail) {
+ nic->rb_work_scheduled = true;
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ }
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+ struct cmp_queue *cq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+ NICVF_CQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ cq->desc = cq->dmem.base;
+ cq->thresh = CMP_QUEUE_CQE_THRESH;
+ nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+ return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+ if (!cq)
+ return;
+ if (!cq->dmem.base)
+ return;
+
+ nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+ struct snd_queue *sq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+ NICVF_SQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ sq->desc = sq->dmem.base;
+ sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->skbuff)
+ return -ENOMEM;
+ sq->head = 0;
+ sq->tail = 0;
+ atomic_set(&sq->free_cnt, q_len - 1);
+ sq->thresh = SND_QUEUE_THRESH;
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys, GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+ if (!sq)
+ return;
+ if (!sq->dmem.base)
+ return;
+
+ if (sq->tso_hdrs)
+ dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+ sq->tso_hdrs, sq->tso_hdrs_phys);
+
+ kfree(sq->skbuff);
+ nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+ /* Check if SQ is stopped */
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+ return;
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ union nic_mbx mbx = {};
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable timer threshold (doesn't get reset upon CQ reset */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+ struct rbdr *rbdr, int qidx)
+{
+ u64 tmp, fifo_state;
+ int timeout = 10;
+
+ /* Save head and tail pointers for feeing up buffers */
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD,
+ qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL,
+ qidx) >> 3;
+
+ /* If RBDR FIFO is in 'FAIL' state then do a reset first
+ * before relaiming.
+ */
+ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ if (((fifo_state >> 62) & 0x03) == 0x3)
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+ while (1) {
+ tmp = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+ qidx);
+ if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "Failed polling on prefetch status\n");
+ return;
+ }
+ }
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct rcv_queue *rq;
+ struct rq_cfg rq_cfg;
+
+ rq = &qs->rq[qidx];
+ rq->enable = enable;
+
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+ if (!rq->enable) {
+ nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ return;
+ }
+
+ rq->cq_qs = qs->vnic_id;
+ rq->cq_idx = qidx;
+ rq->start_rbdr_qs = qs->vnic_id;
+ rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ rq->cont_rbdr_qs = qs->vnic_id;
+ rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ /* all writes of RBDR data to be loaded into L2 Cache as well*/
+ rq->caching = 1;
+
+ /* Send a mailbox msg to PF to config RQ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = qs->vnic_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ (rq->cont_qs_rbdr_idx << 8) |
+ (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* RQ drop config
+ * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+
+ /* Enable Receive queue */
+ rq_cfg.ena = 1;
+ rq_cfg.tcp_ena = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct cmp_queue *cq;
+ struct cq_cfg cq_cfg;
+
+ cq = &qs->cq[qidx];
+ cq->enable = enable;
+
+ if (!cq->enable) {
+ nicvf_reclaim_cmp_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+ if (!cq->enable)
+ return;
+
+ spin_lock_init(&cq->lock);
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+ qidx, (u64)(cq->dmem.phys_base));
+
+ /* Enable Completion queue */
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ cq_cfg.caching = 0;
+ cq_cfg.qsize = CMP_QSIZE;
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+ qidx, nic->cq_coalesce_usecs);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct snd_queue *sq;
+ struct sq_cfg sq_cfg;
+
+ sq = &qs->sq[qidx];
+ sq->enable = enable;
+
+ if (!sq->enable) {
+ nicvf_reclaim_snd_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+ sq->cq_qs = qs->vnic_id;
+ sq->cq_idx = qidx;
+
+ /* Send a mailbox msg to PF to config SQ */
+ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = qs->vnic_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+ qidx, (u64)(sq->dmem.phys_base));
+
+ /* Enable send queue & set queue size */
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = SND_QSIZE;
+ sq_cfg.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+ /* Set queue:cpu affinity for better load distribution */
+ if (cpu_online(qidx)) {
+ cpumask_set_cpu(qidx, &sq->affinity_mask);
+ netif_set_xps_queue(nic->netdev,
+ &sq->affinity_mask, qidx);
+ }
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct rbdr *rbdr;
+ struct rbdr_cfg rbdr_cfg;
+
+ rbdr = &qs->rbdr[qidx];
+ nicvf_reclaim_rbdr(nic, rbdr, qidx);
+ if (!enable)
+ return;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+ qidx, (u64)(rbdr->dmem.phys_base));
+
+ /* Enable RBDR & set queue size */
+ /* Buffer size should be in multiples of 128 bytes */
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = RBDR_SIZE;
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->dma_size / 128;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, *(u64 *)&rbdr_cfg);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ qidx, qs->rbdr_len - 1);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+ qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct queue_set *qs = nic->qs;
+ struct qs_cfg *qs_cfg;
+
+ if (!qs) {
+ netdev_warn(nic->netdev,
+ "Qset is still not allocated, don't init queues\n");
+ return;
+ }
+
+ qs->enable = enable;
+ qs->vnic_id = nic->vf_id;
+
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = qs->vnic_id;
+
+ mbx.qs.cfg = 0;
+ qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+ if (qs->enable) {
+ qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ qs_cfg->vnic = qs->vnic_id;
+ }
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Free receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+ /* Free completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+ /* Free send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Alloc receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+ DMA_BUFFER_LEN))
+ goto alloc_fail;
+ }
+
+ /* Alloc send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ goto alloc_fail;
+ }
+
+ /* Alloc completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+ goto alloc_fail;
+ }
+
+ return 0;
+alloc_fail:
+ nicvf_free_resources(nic);
+ return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+ struct queue_set *qs;
+
+ qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+ nic->qs = qs;
+
+ /* Set count of each queue */
+ qs->rbdr_cnt = RBDR_CNT;
+ qs->rq_cnt = RCV_QUEUE_CNT;
+ qs->sq_cnt = SND_QUEUE_CNT;
+ qs->cq_cnt = CMP_QUEUE_CNT;
+
+ /* Set queue lengths */
+ qs->rbdr_len = RCV_BUF_COUNT;
+ qs->sq_len = SND_QUEUE_LEN;
+ qs->cq_len = CMP_QUEUE_LEN;
+ return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+ bool disable = false;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+
+ if (!qs)
+ return 0;
+
+ if (enable) {
+ if (nicvf_alloc_resources(nic))
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, enable);
+ } else {
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+ nicvf_free_resources(nic);
+ }
+
+ return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ int qentry;
+
+ qentry = sq->tail;
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ sq->tail += desc_cnt;
+ sq->tail &= (sq->dmem.q_len - 1);
+
+ return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ atomic_add(desc_cnt, &sq->free_cnt);
+ sq->head += desc_cnt;
+ sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+ qentry++;
+ qentry &= (sq->dmem.q_len - 1);
+ return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg |= NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg &= ~NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+ int qidx)
+{
+ u64 head, tail;
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct sq_hdr_subdesc *hdr;
+
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+ while (sq->head != head) {
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+ nicvf_put_sq_desc(sq, 1);
+ continue;
+ }
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+ atomic64_add(hdr->tot_len,
+ (atomic64_t *)&netdev->stats.tx_bytes);
+ dev_kfree_skb_any(skb);
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ }
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = skb_frag_size(&sh->frags[f_id]);
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+ return num_edescs + sh->gso_segs;
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+ if (skb_shinfo(skb)->gso_size) {
+ subdesc_cnt = nicvf_tso_count_subdescs(skb);
+ return subdesc_cnt;
+ }
+
+ if (skb_shinfo(skb)->nr_frags)
+ subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+ return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, struct sk_buff *skb, int len)
+{
+ int proto;
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ sq->skbuff[qentry] = (u64)skb;
+
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+
+ /* Offload checksum calculation to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ hdr->csum_l3 = 1; /* Enable IP csum calculation */
+ hdr->l3_offset = skb_network_offset(skb);
+ hdr->l4_offset = skb_transport_offset(skb);
+
+ proto = ip_hdr(skb)->protocol;
+ switch (proto) {
+ case IPPROTO_TCP:
+ hdr->csum_l4 = SEND_L4_CSUM_TCP;
+ break;
+ case IPPROTO_UDP:
+ hdr->csum_l4 = SEND_L4_CSUM_UDP;
+ break;
+ case IPPROTO_SCTP:
+ hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+ break;
+ }
+ }
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data)
+{
+ struct sq_gather_subdesc *gather;
+
+ qentry &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+ memset(gather, 0, SND_QUEUE_DESC_SIZE);
+ gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+ gather->size = size;
+ gather->addr = data;
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+ int qentry, struct sk_buff *skb)
+{
+ struct tso_t tso;
+ int seg_subdescs = 0, desc_cnt = 0;
+ int seg_len, total_len, data_left;
+ int hdr_qentry = qentry;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Save Qentry for adding HDR_SUBDESC at the end */
+ hdr_qentry = qentry;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* Add segment's header */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+ sq->tso_hdrs_phys +
+ qentry * TSO_HEADER_SIZE);
+ /* HDR_SUDESC + GATHER */
+ seg_subdescs = 2;
+ seg_len = hdr_len;
+
+ /* Add segment's payload fragments */
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(tso.data));
+ seg_subdescs++;
+ seg_len += size;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+ seg_subdescs - 1, skb, seg_len);
+ sq->skbuff[hdr_qentry] = 0;
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+ desc_cnt += seg_subdescs;
+ }
+ /* Save SKB in the last segment for freeing */
+ sq->skbuff[hdr_qentry] = (u64)skb;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ skb_get_queue_mapping(skb), desc_cnt);
+ return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+{
+ int i, size;
+ int subdesc_cnt;
+ int sq_num, qentry;
+ struct queue_set *qs = nic->qs;
+ struct snd_queue *sq;
+
+ sq_num = skb_get_queue_mapping(skb);
+ sq = &qs->sq[sq_num];
+
+ subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+ if (subdesc_cnt > atomic_read(&sq->free_cnt))
+ goto append_fail;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ /* Check if its a TSO packet */
+ if (skb_shinfo(skb)->gso_size)
+ return nicvf_sq_append_tso(nic, sq, qentry, skb);
+
+ /* Add SQ header subdesc */
+ nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+
+ /* Add SQ gather subdescs */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+
+ /* Check for scattered buffer */
+ if (!skb_is_nonlinear(skb))
+ goto doorbell;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_frag_size(frag);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(
+ skb_frag_address(frag)));
+ }
+
+doorbell:
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit new packet */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, subdesc_cnt);
+ return 1;
+
+append_fail:
+ netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+ return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+ int frag;
+ int payload_len = 0;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_frag = NULL;
+ struct sk_buff *prev_frag = NULL;
+ u16 *rb_lens = NULL;
+ u64 *rb_ptrs = NULL;
+
+ rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+
+ netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
+ __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
+
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+ payload_len = rb_lens[frag_num(frag)];
+ if (!frag) {
+ /* First fragment */
+ skb = nicvf_rb_ptr_to_skb(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ payload_len);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, cqe_rx->align_pad);
+ skb_put(skb, payload_len);
+ } else {
+ /* Add fragments */
+ skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
+ payload_len);
+ if (!skb_frag) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ if (!skb_shinfo(skb)->frag_list)
+ skb_shinfo(skb)->frag_list = skb_frag;
+ else
+ prev_frag->next = skb_frag;
+
+ prev_frag = skb_frag;
+ skb->len += payload_len;
+ skb->data_len += payload_len;
+ skb_frag->len = payload_len;
+ }
+ /* Next buffer pointer */
+ rb_ptrs++;
+ }
+ return skb;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to disable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to clear interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+ u64 mask = 0xff;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ mask = NICVF_INTR_PKT_DROP_MASK;
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ mask = NICVF_INTR_TCP_TIMER_MASK;
+ break;
+ case NICVF_INTR_MBOX:
+ mask = NICVF_INTR_MBOX_MASK;
+ break;
+ case NICVF_INTR_QS_ERR:
+ mask = NICVF_INTR_QS_ERR_MASK;
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to check interrupt enable: unknown type\n");
+ break;
+ }
+
+ return (reg_val & mask);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+ struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ rq = &nic->qs->rq[rq_idx];
+ rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+ rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+ struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ sq = &nic->qs->sq[sq_idx];
+ sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+ sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+ stats->rx.errop.good++;
+ return 0;
+ }
+
+ if (netif_msg_rx_err(nic))
+ netdev_err(nic->netdev,
+ "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ nic->netdev->name,
+ cqe_rx->err_level, cqe_rx->err_opcode);
+
+ switch (cqe_rx->err_level) {
+ case CQ_ERRLVL_MAC:
+ stats->rx.errlvl.mac_errs++;
+ break;
+ case CQ_ERRLVL_L2:
+ stats->rx.errlvl.l2_errs++;
+ break;
+ case CQ_ERRLVL_L3:
+ stats->rx.errlvl.l3_errs++;
+ break;
+ case CQ_ERRLVL_L4:
+ stats->rx.errlvl.l4_errs++;
+ break;
+ }
+
+ switch (cqe_rx->err_opcode) {
+ case CQ_RX_ERROP_RE_PARTIAL:
+ stats->rx.errop.partial_pkts++;
+ break;
+ case CQ_RX_ERROP_RE_JABBER:
+ stats->rx.errop.jabber_errs++;
+ break;
+ case CQ_RX_ERROP_RE_FCS:
+ stats->rx.errop.fcs_errs++;
+ break;
+ case CQ_RX_ERROP_RE_TERMINATE:
+ stats->rx.errop.terminate_errs++;
+ break;
+ case CQ_RX_ERROP_RE_RX_CTL:
+ stats->rx.errop.bgx_rx_errs++;
+ break;
+ case CQ_RX_ERROP_PREL2_ERR:
+ stats->rx.errop.prel2_errs++;
+ break;
+ case CQ_RX_ERROP_L2_FRAGMENT:
+ stats->rx.errop.l2_frags++;
+ break;
+ case CQ_RX_ERROP_L2_OVERRUN:
+ stats->rx.errop.l2_overruns++;
+ break;
+ case CQ_RX_ERROP_L2_PFCS:
+ stats->rx.errop.l2_pfcs++;
+ break;
+ case CQ_RX_ERROP_L2_PUNY:
+ stats->rx.errop.l2_puny++;
+ break;
+ case CQ_RX_ERROP_L2_MAL:
+ stats->rx.errop.l2_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_L2_OVERSIZE:
+ stats->rx.errop.l2_oversize++;
+ break;
+ case CQ_RX_ERROP_L2_UNDERSIZE:
+ stats->rx.errop.l2_undersize++;
+ break;
+ case CQ_RX_ERROP_L2_LENMISM:
+ stats->rx.errop.l2_len_mismatch++;
+ break;
+ case CQ_RX_ERROP_L2_PCLP:
+ stats->rx.errop.l2_pclp++;
+ break;
+ case CQ_RX_ERROP_IP_NOT:
+ stats->rx.errop.non_ip++;
+ break;
+ case CQ_RX_ERROP_IP_CSUM_ERR:
+ stats->rx.errop.ip_csum_err++;
+ break;
+ case CQ_RX_ERROP_IP_MAL:
+ stats->rx.errop.ip_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_MALD:
+ stats->rx.errop.ip_payload_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_HOP:
+ stats->rx.errop.ip_hop_errs++;
+ break;
+ case CQ_RX_ERROP_L3_ICRC:
+ stats->rx.errop.l3_icrc_errs++;
+ break;
+ case CQ_RX_ERROP_L3_PCLP:
+ stats->rx.errop.l3_pclp++;
+ break;
+ case CQ_RX_ERROP_L4_MAL:
+ stats->rx.errop.l4_malformed++;
+ break;
+ case CQ_RX_ERROP_L4_CHK:
+ stats->rx.errop.l4_csum_errs++;
+ break;
+ case CQ_RX_ERROP_UDP_LEN:
+ stats->rx.errop.udp_len_err++;
+ break;
+ case CQ_RX_ERROP_L4_PORT:
+ stats->rx.errop.bad_l4_port++;
+ break;
+ case CQ_RX_ERROP_TCP_FLAG:
+ stats->rx.errop.bad_tcp_flag++;
+ break;
+ case CQ_RX_ERROP_TCP_OFFSET:
+ stats->rx.errop.tcp_offset_errs++;
+ break;
+ case CQ_RX_ERROP_L4_PCLP:
+ stats->rx.errop.l4_pclp++;
+ break;
+ case CQ_RX_ERROP_RBDR_TRUNC:
+ stats->rx.errop.pkt_truncated++;
+ break;
+ }
+
+ return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ switch (cqe_tx->send_status) {
+ case CQ_TX_ERROP_GOOD:
+ stats->tx.good++;
+ return 0;
+ case CQ_TX_ERROP_DESC_FAULT:
+ stats->tx.desc_fault++;
+ break;
+ case CQ_TX_ERROP_HDR_CONS_ERR:
+ stats->tx.hdr_cons_err++;
+ break;
+ case CQ_TX_ERROP_SUBDC_ERR:
+ stats->tx.subdesc_err++;
+ break;
+ case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+ stats->tx.imm_size_oflow++;
+ break;
+ case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+ stats->tx.data_seq_err++;
+ break;
+ case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+ stats->tx.mem_seq_err++;
+ break;
+ case CQ_TX_ERROP_LOCK_VIOL:
+ stats->tx.lock_viol++;
+ break;
+ case CQ_TX_ERROP_DATA_FAULT:
+ stats->tx.data_fault++;
+ break;
+ case CQ_TX_ERROP_TSTMP_CONFLICT:
+ stats->tx.tstmp_conflict++;
+ break;
+ case CQ_TX_ERROP_TSTMP_TIMEOUT:
+ stats->tx.tstmp_timeout++;
+ break;
+ case CQ_TX_ERROP_MEM_FAULT:
+ stats->tx.mem_fault++;
+ break;
+ case CQ_TX_ERROP_CK_OVERLAP:
+ stats->tx.csum_overlap++;
+ break;
+ case CQ_TX_ERROP_CK_OFLOW:
+ stats->tx.csum_overflow++;
+ break;
+ }
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644
index 000000000000..8341bdf755d1
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+/* VF's queue interrupt ranges */
+#define NICVF_INTR_ID_CQ 0
+#define NICVF_INTR_ID_SQ 8
+#define NICVF_INTR_ID_RBDR 16
+#define NICVF_INTR_ID_MISC 18
+#define NICVF_INTR_ID_QS_ERR 19
+
+#define for_each_cq_irq(irq) \
+ for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define for_each_sq_irq(irq) \
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define for_each_rbdr_irq(irq) \
+ for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0 0ULL /* 8K entries */
+#define RBDR_SIZE1 1ULL /* 16K entries */
+#define RBDR_SIZE2 2ULL /* 32K entries */
+#define RBDR_SIZE3 3ULL /* 64K entries */
+#define RBDR_SIZE4 4ULL /* 126K entries */
+#define RBDR_SIZE5 5ULL /* 256K entries */
+#define RBDR_SIZE6 6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT 1
+#define RCV_QUEUE_CNT 8
+#define SND_QUEUE_CNT 8
+#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE SND_QUEUE_SIZE4
+#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH 2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT 2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT 1
+
+#define CMP_QSIZE CMP_QUEUE_SIZE4
+#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH 0
+#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+
+#define RBDR_SIZE RBDR_SIZE0
+#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+ (NICVF_RCV_BUF_ALIGN_BYTES * 2))
+#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
+
+#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+ MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE 16
+#define CMP_QUEUE_DESC_SIZE 512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN 7
+#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
+#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
+ (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
+#define NICVF_RCV_BUF_ALIGN_LEN(X)\
+ (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET BIT_ULL(41)
+#define NICVF_SQ_RESET BIT_ULL(17)
+#define NICVF_RBDR_RESET BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+ CQ_RX_ERROP_RE_NONE = 0x0,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+ CQ_TX_ERROP_GOOD = 0x0,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+ struct rx_stats {
+ struct {
+ u64 mac_errs;
+ u64 l2_errs;
+ u64 l3_errs;
+ u64 l4_errs;
+ } errlvl;
+ struct {
+ u64 good;
+ u64 partial_pkts;
+ u64 jabber_errs;
+ u64 fcs_errs;
+ u64 terminate_errs;
+ u64 bgx_rx_errs;
+ u64 prel2_errs;
+ u64 l2_frags;
+ u64 l2_overruns;
+ u64 l2_pfcs;
+ u64 l2_puny;
+ u64 l2_hdr_malformed;
+ u64 l2_oversize;
+ u64 l2_undersize;
+ u64 l2_len_mismatch;
+ u64 l2_pclp;
+ u64 non_ip;
+ u64 ip_csum_err;
+ u64 ip_hdr_malformed;
+ u64 ip_payload_malformed;
+ u64 ip_hop_errs;
+ u64 l3_icrc_errs;
+ u64 l3_pclp;
+ u64 l4_malformed;
+ u64 l4_csum_errs;
+ u64 udp_len_err;
+ u64 bad_l4_port;
+ u64 bad_tcp_flag;
+ u64 tcp_offset_errs;
+ u64 l4_pclp;
+ u64 pkt_truncated;
+ } errop;
+ } rx;
+ struct tx_stats {
+ u64 good;
+ u64 desc_fault;
+ u64 hdr_cons_err;
+ u64 subdesc_err;
+ u64 imm_size_oflow;
+ u64 data_seq_err;
+ u64 mem_seq_err;
+ u64 lock_viol;
+ u64 data_fault;
+ u64 tstmp_conflict;
+ u64 tstmp_timeout;
+ u64 mem_fault;
+ u64 csum_overlap;
+ u64 csum_overflow;
+ } tx;
+} ____cacheline_aligned_in_smp;
+
+enum RQ_SQ_STATS {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+ u64 bytes;
+ u64 pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+ dma_addr_t dma;
+ u64 size;
+ u16 q_len;
+ dma_addr_t phys_base;
+ void *base;
+ void *unalign_base;
+};
+
+struct rbdr {
+ bool enable;
+ u32 dma_size;
+ u32 frag_len;
+ u32 thresh; /* Threshold level for interrupt */
+ void *desc;
+ u32 head;
+ u32 tail;
+ struct q_desc_mem dmem;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+ bool enable;
+ struct rbdr *rbdr_start;
+ struct rbdr *rbdr_cont;
+ bool en_tcp_reassembly;
+ u8 cq_qs; /* CQ's QS to which this RQ is assigned */
+ u8 cq_idx; /* CQ index (0 to 7) in the QS */
+ u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
+ u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
+ u8 start_rbdr_qs; /* First buffer ptrs - QS num */
+ u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
+ u8 caching;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+ bool enable;
+ u16 thresh;
+ spinlock_t lock; /* lock to serialize processing CQEs */
+ void *desc;
+ struct q_desc_mem dmem;
+ struct cmp_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+ bool enable;
+ u8 cq_qs; /* CQ's QS to which this SQ is pointing */
+ u8 cq_idx; /* CQ index (0 to 7) in the above QS */
+ u16 thresh;
+ atomic_t free_cnt;
+ u32 head;
+ u32 tail;
+ u64 *skbuff;
+ void *desc;
+
+#define TSO_HEADER_SIZE 128
+ /* For TSO segment's header */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_phys;
+
+ cpumask_t affinity_mask;
+ struct q_desc_mem dmem;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+ bool enable;
+ bool be_en;
+ u8 vnic_id;
+ u8 rq_cnt;
+ u8 cq_cnt;
+ u64 cq_len;
+ u8 sq_cnt;
+ u64 sq_len;
+ u8 rbdr_cnt;
+ u64 rbdr_len;
+ struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
+ struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
+ struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
+ struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+ (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+ (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+ (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define CQ_WR_FULL BIT(26)
+#define CQ_WR_DISABLE BIT(25)
+#define CQ_WR_FAULT BIT(24)
+#define CQ_CQE_COUNT (0xFFFF << 0)
+
+#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+ struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic,
+ u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644
index 000000000000..3c1de97b1add
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -0,0 +1,701 @@
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD = 0x0,
+ NIC_SEND_LD_TYPE_E_LDT = 0x1,
+ NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE = 0x0,
+ ETYPE_ALG_SKIP = 0x1,
+ ETYPE_ALG_ENDPARSE = 0x2,
+ ETYPE_ALG_VLAN = 0x3,
+ ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+ L3TYPE_NONE = 0x00,
+ L3TYPE_GRH = 0x01,
+ L3TYPE_IPV4 = 0x04,
+ L3TYPE_IPV4_OPTIONS = 0x05,
+ L3TYPE_IPV6 = 0x06,
+ L3TYPE_IPV6_OPTIONS = 0x07,
+ L3TYPE_ET_STOP = 0x0D,
+ L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+ L4TYPE_NONE = 0x00,
+ L4TYPE_IPSEC_ESP = 0x01,
+ L4TYPE_IPFRAG = 0x02,
+ L4TYPE_IPCOMP = 0x03,
+ L4TYPE_TCP = 0x04,
+ L4TYPE_UDP = 0x05,
+ L4TYPE_SCTP = 0x06,
+ L4TYPE_GRE = 0x07,
+ L4TYPE_ROCE_BTH = 0x08,
+ L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE = 0x0,
+ CPI_ALG_VLAN = 0x1,
+ CPI_ALG_VLAN16 = 0x2,
+ CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE = 0x00,
+ RSS_ALG_PORT = 0x01,
+ RSS_ALG_IP = 0x02,
+ RSS_ALG_TCP_IP = 0x03,
+ RSS_ALG_UDP_IP = 0x04,
+ RSS_ALG_SCTP_IP = 0x05,
+ RSS_ALG_GRE_IP = 0x06,
+ RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC = 0x00,
+ RSS_HASH_IP = 0x01,
+ RSS_HASH_TCP = 0x02,
+ RSS_HASH_TCP_SYN_DIS = 0x03,
+ RSS_HASH_UDP = 0x04,
+ RSS_HASH_L4ETC = 0x05,
+ RSS_HASH_ROCE = 0x06,
+ RSS_L3_BIDI = 0x07,
+ RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID = 0x0,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD = 0x00,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+ CQE_RX_TCP_END_INVALID_FLAG = 1,
+ CQE_RX_TCP_END_TIMEOUT = 2,
+ CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+ CQE_RX_TCP_END_PKT_ERR = 4,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE = 0x0,
+ CQE_RX_ERRLVL_L2 = 0x1,
+ CQE_RX_ERRLVL_L3 = 0x2,
+ CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE = 0x0,
+ CQE_RX_ERR_RE_PARTIAL = 0x1,
+ CQE_RX_ERR_RE_JABBER = 0x2,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 stdn_fault:1;
+ u64 rsvd0:1;
+ u64 rq_qs:7;
+ u64 rq_idx:3;
+ u64 rsvd1:12;
+ u64 rss_alg:4;
+ u64 rsvd2:4;
+ u64 rb_cnt:4;
+ u64 vlan_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan2_stripped:1;
+ u64 l4_type:4;
+ u64 l3_type:4;
+ u64 l2_present:1;
+ u64 err_level:3;
+ u64 err_opcode:8;
+
+ u64 pkt_len:16; /* W1 */
+ u64 l2_ptr:8;
+ u64 l3_ptr:8;
+ u64 l4_ptr:8;
+ u64 cq_pkt_len:8;
+ u64 align_pad:3;
+ u64 rsvd3:1;
+ u64 chan:12;
+
+ u64 rss_tag:32; /* W2 */
+ u64 vlan_tci:16;
+ u64 vlan_ptr:8;
+ u64 vlan2_ptr:8;
+
+ u64 rb3_sz:16; /* W3 */
+ u64 rb2_sz:16;
+ u64 rb1_sz:16;
+ u64 rb0_sz:16;
+
+ u64 rb7_sz:16; /* W4 */
+ u64 rb6_sz:16;
+ u64 rb5_sz:16;
+ u64 rb4_sz:16;
+
+ u64 rb11_sz:16; /* W5 */
+ u64 rb10_sz:16;
+ u64 rb9_sz:16;
+ u64 rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 err_opcode:8;
+ u64 err_level:3;
+ u64 l2_present:1;
+ u64 l3_type:4;
+ u64 l4_type:4;
+ u64 vlan2_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan_found:1;
+ u64 rb_cnt:4;
+ u64 rsvd2:4;
+ u64 rss_alg:4;
+ u64 rsvd1:12;
+ u64 rq_idx:3;
+ u64 rq_qs:7;
+ u64 rsvd0:1;
+ u64 stdn_fault:1;
+ u64 cqe_type:4; /* W0 */
+ u64 chan:12;
+ u64 rsvd3:1;
+ u64 align_pad:3;
+ u64 cq_pkt_len:8;
+ u64 l4_ptr:8;
+ u64 l3_ptr:8;
+ u64 l2_ptr:8;
+ u64 pkt_len:16; /* W1 */
+ u64 vlan2_ptr:8;
+ u64 vlan_ptr:8;
+ u64 vlan_tci:16;
+ u64 rss_tag:32; /* W2 */
+ u64 rb0_sz:16;
+ u64 rb1_sz:16;
+ u64 rb2_sz:16;
+ u64 rb3_sz:16; /* W3 */
+ u64 rb4_sz:16;
+ u64 rb5_sz:16;
+ u64 rb6_sz:16;
+ u64 rb7_sz:16; /* W4 */
+ u64 rb8_sz:16;
+ u64 rb9_sz:16;
+ u64 rb10_sz:16;
+ u64 rb11_sz:16; /* W5 */
+#endif
+ u64 rb0_ptr:64;
+ u64 rb1_ptr:64;
+ u64 rb2_ptr:64;
+ u64 rb3_ptr:64;
+ u64 rb4_ptr:64;
+ u64 rb5_ptr:64;
+ u64 rb6_ptr:64;
+ u64 rb7_ptr:64;
+ u64 rb8_ptr:64;
+ u64 rb9_ptr:64;
+ u64 rb10_ptr:64;
+ u64 rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:60;
+
+ u64 rsvd1:4; /* W1 */
+ u64 partial_first:1;
+ u64 rsvd2:27;
+ u64 rbdr_bytes:8;
+ u64 rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 rsvd0:60;
+ u64 cqe_type:4;
+
+ u64 rsvd3:24;
+ u64 rbdr_bytes:8;
+ u64 rsvd2:27;
+ u64 partial_first:1;
+ u64 rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:52;
+ u64 cq_tcp_status:8;
+
+ u64 rsvd1:32; /* W1 */
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd2:8;
+ u64 tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cq_tcp_status:8;
+ u64 rsvd0:52;
+ u64 cqe_type:4; /* W0 */
+
+ u64 tcp_err_bytes:16;
+ u64 rsvd2:8;
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:4;
+ u64 sqe_ptr:16;
+ u64 rsvd1:4;
+ u64 rsvd2:10;
+ u64 sq_qs:7;
+ u64 sq_idx:3;
+ u64 rsvd3:8;
+ u64 send_status:8;
+
+ u64 ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 send_status:8;
+ u64 rsvd3:8;
+ u64 sq_idx:3;
+ u64 sq_qs:7;
+ u64 rsvd2:10;
+ u64 rsvd1:4;
+ u64 sqe_ptr:16;
+ u64 rsvd0:4;
+ u64 cqe_type:4; /* W0 */
+
+ u64 ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+ u64 u[64];
+ struct cqe_send_t snd_hdr;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd0:15;
+ u64 buf_addr:42;
+ u64 cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cache_align:7;
+ u64 buf_addr:42;
+ u64 rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 tcp_pkt_cnt:12;
+ u64 rsvd1:4;
+ u64 align_hdr_bytes:4;
+ u64 align_ptr_bytes:4;
+ u64 ptr_bytes:16;
+ u64 rsvd2:24;
+ u64 cqe_type:4;
+ u64 rsvd0:54;
+ u64 tcp_end_reason:2;
+ u64 tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_status:4;
+ u64 tcp_end_reason:2;
+ u64 rsvd0:54;
+ u64 cqe_type:4;
+ u64 rsvd2:24;
+ u64 ptr_bytes:16;
+ u64 align_ptr_bytes:4;
+ u64 align_hdr_bytes:4;
+ u64 rsvd1:4;
+ u64 tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ u64 opaque:32;
+ u64 rss_flow:8;
+ u64 skip_length:6;
+ u64 disable_rss:1;
+ u64 disable_tcp_reassembly:1;
+ u64 nodrop:1;
+ u64 dest_alg:2;
+ u64 rsvd0:2;
+ u64 dest_rq:11;
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE = 0x00,
+ SEND_L4_CSUM_UDP = 0x01,
+ SEND_L4_CSUM_TCP = 0x02,
+ SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32 = 0x00,
+ SEND_CRCALG_CRC32C = 0x01,
+ SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD = 0x00,
+ SEND_LD_TYPE_LDT = 0x01,
+ SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET = 0x00,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64 = 0x00,
+ SEND_MEMDSZ_B32 = 0x01,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID = 0x00,
+ SQ_DESC_TYPE_HEADER = 0x01,
+ SQ_DESC_TYPE_CRC = 0x02,
+ SQ_DESC_TYPE_IMMEDIATE = 0x03,
+ SQ_DESC_TYPE_GATHER = 0x04,
+ SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd1:32;
+ u64 crc_ival:32;
+ u64 subdesc_type:4;
+ u64 crc_alg:2;
+ u64 rsvd0:10;
+ u64 crc_insert_pos:16;
+ u64 hdr_start:16;
+ u64 crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 crc_len:16;
+ u64 hdr_start:16;
+ u64 crc_insert_pos:16;
+ u64 rsvd0:10;
+ u64 crc_alg:2;
+ u64 subdesc_type:4;
+ u64 crc_ival:32;
+ u64 rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 ld_type:2;
+ u64 rsvd0:42;
+ u64 size:16;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 size:16;
+ u64 rsvd0:42;
+ u64 ld_type:2;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 rsvd0:46;
+ u64 len:14;
+
+ u64 data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 len:14;
+ u64 rsvd0:46;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 mem_alg:4;
+ u64 mem_dsz:2;
+ u64 wmem:1;
+ u64 rsvd0:21;
+ u64 offset:32;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 offset:32;
+ u64 rsvd0:21;
+ u64 wmem:1;
+ u64 mem_dsz:2;
+ u64 mem_alg:4;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4;
+ u64 tso:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 dont_send:1;
+ u64 tstmp:1;
+ u64 subdesc_cnt:8;
+ u64 csum_l4:2;
+ u64 csum_l3:1;
+ u64 rsvd0:5;
+ u64 l4_offset:8;
+ u64 l3_offset:8;
+ u64 rsvd1:4;
+ u64 tot_len:20; /* W0 */
+
+ u64 tso_sdc_cont:8;
+ u64 tso_sdc_first:8;
+ u64 tso_l4_offset:8;
+ u64 tso_flags_last:12;
+ u64 tso_flags_first:12;
+ u64 rsvd2:2;
+ u64 tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tot_len:20;
+ u64 rsvd1:4;
+ u64 l3_offset:8;
+ u64 l4_offset:8;
+ u64 rsvd0:5;
+ u64 csum_l3:1;
+ u64 csum_l4:2;
+ u64 subdesc_cnt:8;
+ u64 tstmp:1;
+ u64 dont_send:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 tso:1;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 tso_max_paysize:14;
+ u64 rsvd2:2;
+ u64 tso_flags_first:12;
+ u64 tso_flags_last:12;
+ u64 tso_l4_offset:8;
+ u64 tso_sdc_first:8;
+ u64 tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63:62;
+ u64 ena:1;
+ u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_ena:1;
+ u64 ena:1;
+ u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_43_63:21;
+ u64 ena:1;
+ u64 reset:1;
+ u64 caching:1;
+ u64 reserved_35_39:5;
+ u64 qsize:3;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_15:16;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:3;
+ u64 reserved_35_39:5;
+ u64 caching:1;
+ u64 reset:1;
+ u64 ena:1;
+ u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63:44;
+ u64 ena:1;
+ u64 reserved_18_18:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_11_15:5;
+ u64 qsize:3;
+ u64 reserved_3_7:5;
+ u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tstmp_bgx_intf:3;
+ u64 reserved_3_7:5;
+ u64 qsize:3;
+ u64 reserved_11_15:5;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 reserved_18_18:1;
+ u64 ena:1;
+ u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63:19;
+ u64 ena:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_36_41:6;
+ u64 qsize:4;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_12_15:4;
+ u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 lines:12;
+ u64 reserved_12_15:4;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:4;
+ u64 reserved_36_41:6;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 ena: 1;
+ u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 ena:1;
+ u64 reserved_27_30:4;
+ u64 sq_ins_ena:1;
+ u64 sq_ins_pos:6;
+ u64 lock_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 send_tstmp_ena:1;
+ u64 be:1;
+ u64 reserved_7_15:9;
+ u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 vnic:7;
+ u64 reserved_7_15:9;
+ u64 be:1;
+ u64 send_tstmp_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 lock_ena:1;
+ u64 sq_ins_pos:6;
+ u64 sq_ins_ena:1;
+ u64 reserved_27_30:4;
+ u64 ena:1;
+ u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644
index 000000000000..633ec05dfe05
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-BGX"
+#define DRV_VERSION "1.0"
+
+struct lmac {
+ struct bgx *bgx;
+ int dmac;
+ unsigned char mac[ETH_ALEN];
+ bool link_up;
+ int lmacid; /* ID within BGX */
+ int lmacid_bd; /* ID on board */
+ struct net_device netdev;
+ struct phy_device *phydev;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ bool is_sgmii;
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+};
+
+struct bgx {
+ u8 bgx_id;
+ u8 qlm_mode;
+ struct lmac lmac[MAX_LMAC_PER_BGX];
+ int lmac_count;
+ int lmac_type;
+ int lane_to_sds;
+ int use_training;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+ int timeout = 100;
+ u64 reg_val;
+
+ while (timeout) {
+ reg_val = bgx_reg_read(bgx, lmac, reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ return 1;
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+ int i;
+ unsigned map = 0;
+
+ for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ map |= (1 << i);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (bgx)
+ return bgx->lmac_count;
+
+ return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+ struct bgx_link_status *link = (struct bgx_link_status *)status;
+ struct bgx *bgx;
+ struct lmac *lmac;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+ link->link_up = lmac->link_up;
+ link->duplex = lmac->last_duplex;
+ link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (bgx)
+ return bgx->lmac[lmacid].mac;
+
+ return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (!bgx)
+ return;
+
+ ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ u64 cmr_cfg;
+ u64 port_cfg = 0;
+ u64 misc_ctl = 0;
+
+ cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+ cmr_cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+ misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+ if (lmac->link_up) {
+ misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+ port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+ port_cfg |= (lmac->last_duplex << 2);
+ } else {
+ misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+ }
+
+ switch (lmac->last_speed) {
+ case 10:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 50; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 100:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 5; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 1000:
+ port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 1; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+ if (lmac->last_duplex)
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 0);
+ else
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 8192);
+ break;
+ default:
+ break;
+ }
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+ /* renable lmac */
+ cmr_cfg |= CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+ struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+ struct phy_device *phydev = lmac->phydev;
+ int link_changed = 0;
+
+ if (!lmac)
+ return;
+
+ if (!phydev->link && lmac->last_link)
+ link_changed = -1;
+
+ if (phydev->link &&
+ (lmac->last_duplex != phydev->duplex ||
+ lmac->last_link != phydev->link ||
+ lmac->last_speed != phydev->speed)) {
+ link_changed = 1;
+ }
+
+ lmac->last_link = phydev->link;
+ lmac->last_speed = phydev->speed;
+ lmac->last_duplex = phydev->duplex;
+
+ if (!link_changed)
+ return;
+
+ if (link_changed > 0)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
+
+ if (lmac->is_sgmii)
+ bgx_sgmii_change_link_state(lmac);
+ else
+ bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ if (idx > 8)
+ lmac = 0;
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
+{
+ u64 offset;
+
+ while (bgx->lmac[lmac].dmac > 0) {
+ offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
+ (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
+ bgx->lmac[lmac].dmac--;
+ }
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+ /* Disable frame alignment if using preamble */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ if (cfg & 1)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* PCS reset */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ return -1;
+ }
+
+ /* power down, reset autoneg, autoneg enable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg &= ~PCS_MRX_CTL_PWR_DN;
+ cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+ u64 cfg;
+
+ /* Reset SPU */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ /* Set interleaved running disparity for RXAUI */
+ if (bgx->lmac_type != BGX_MODE_RXAUI)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ else
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+ SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+ /* clear all interrupts */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ if (bgx->use_training) {
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+ /* training enable */
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+ }
+
+ /* Append FCS to each packet */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+ /* Disable forward error correction */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+ cfg &= ~SPU_FEC_CTL_FEC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+ /* Disable autoneg */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+ if (bgx->lmac_type == BGX_MODE_10G_KR)
+ cfg |= (1 << 23);
+ else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ cfg |= (1 << 24);
+ else
+ cfg &= ~((1 << 23) | (1 << 24));
+ cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+ cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+ cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+ cfg &= ~SPU_CTL_LOW_POWER;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+ cfg &= ~SMU_TX_CTL_UNI_EN;
+ cfg |= SMU_TX_CTL_DIC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+ /* take lmac_count into account */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+ return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ int lmacid = lmac->lmacid;
+ int lmac_type = bgx->lmac_type;
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+
+ /* wait for PCS to come out of reset */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+ (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+ SPU_BR_STATUS_BLK_LOCK, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BR_STATUS_BLK_LOCK not completed\n");
+ return -1;
+ }
+ } else {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+ SPU_BX_STATUS_RX_ALIGN, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BX_STATUS_RX_ALIGN not completed\n");
+ return -1;
+ }
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /* Wait for MAC RX to be ready */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+ SMU_RX_CTL_STATUS, true)) {
+ dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+ return -1;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+ return -1;
+ }
+
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault\n");
+ return -1;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false)) {
+ dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+ return -1;
+ }
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+ return 0;
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+ struct lmac *lmac;
+ u64 link;
+
+ lmac = container_of(work, struct lmac, dwork.work);
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false);
+
+ link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ if (link & SPU_STATUS1_RCV_LNK) {
+ lmac->link_up = 1;
+ if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ lmac->last_speed = 40000;
+ else
+ lmac->last_speed = 10000;
+ lmac->last_duplex = 1;
+ } else {
+ lmac->link_up = 0;
+ }
+
+ if (lmac->last_link != lmac->link_up) {
+ lmac->last_link = lmac->link_up;
+ if (lmac->link_up)
+ bgx_xaui_check_link(lmac);
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ lmac->bgx = bgx;
+
+ if (bgx->lmac_type == BGX_MODE_SGMII) {
+ lmac->is_sgmii = 1;
+ if (bgx_lmac_sgmii_init(bgx, lmacid))
+ return -1;
+ } else {
+ lmac->is_sgmii = 0;
+ if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ return -1;
+ }
+
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+ } else {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+ }
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+ CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ /* Restore default cfg, incase low level firmware changed it */
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if (!lmac->phydev)
+ return -ENODEV;
+
+ lmac->phydev->dev_flags = 0;
+
+ if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ bgx_lmac_handler,
+ PHY_INTERFACE_MODE_SGMII))
+ return -ENODEV;
+
+ phy_start_aneg(lmac->phydev);
+ } else {
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ }
+
+ return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cmrx_cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->check_link) {
+ /* Destroy work queue */
+ cancel_delayed_work(&lmac->dwork);
+ flush_workqueue(lmac->check_link);
+ destroy_workqueue(lmac->check_link);
+ }
+
+ cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cmrx_cfg &= ~(1 << 15);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ bgx_flush_dmac_addrs(bgx, lmacid);
+
+ if (lmac->phydev)
+ phy_disconnect(lmac->phydev);
+
+ lmac->phydev = NULL;
+}
+
+static void bgx_set_num_ports(struct bgx *bgx)
+{
+ u64 lmac_count;
+
+ switch (bgx->qlm_mode) {
+ case QLM_MODE_SGMII:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_SGMII;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_RXAUI_2X2:
+ bgx->lmac_count = 2;
+ bgx->lmac_type = BGX_MODE_RXAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_XFI_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_XFI;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XLAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XLAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_10G_KR_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_10G_KR;
+ bgx->lane_to_sds = 0;
+ bgx->use_training = 1;
+ break;
+ case QLM_MODE_40G_KR4_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_40G_KR;
+ bgx->lane_to_sds = 0xE4;
+ bgx->use_training = 1;
+ break;
+ default:
+ bgx->lmac_count = 0;
+ break;
+ }
+
+ /* Check if low level firmware has programmed LMAC count
+ * based on board type, if yes consider that otherwise
+ * the default static values
+ */
+ lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (lmac_count != 4)
+ bgx->lmac_count = lmac_count;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+ int i;
+
+ bgx_set_num_ports(bgx);
+
+ bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+ if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+ dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+ /* Set lmac type and lane2serdes mapping */
+ for (i = 0; i < bgx->lmac_count; i++) {
+ if (bgx->lmac_type == BGX_MODE_RXAUI) {
+ if (i)
+ bgx->lane_to_sds = 0x0e;
+ else
+ bgx->lane_to_sds = 0x04;
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | bgx->lane_to_sds);
+ continue;
+ }
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ bgx->lmac[i].lmacid_bd = lmac_count;
+ lmac_count++;
+ }
+
+ bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+ /* Set the backpressure AND mask */
+ for (i = 0; i < bgx->lmac_count; i++)
+ bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+ ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+ (i * MAX_BGX_CHANS_PER_LMAC));
+
+ /* Disable all MAC filtering */
+ for (i = 0; i < RX_DMAC_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+ /* Disable MAC steering (NCSI traffic) */
+ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct device *dev = &bgx->pdev->dev;
+ int lmac_type;
+ int train_en;
+
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac_type = (lmac_type >> 8) & 0x07;
+
+ train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+
+ switch (lmac_type) {
+ case BGX_MODE_SGMII:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XAUI:
+ bgx->qlm_mode = QLM_MODE_XAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_RXAUI:
+ bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
+ dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XFI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XFI_4X1;
+ dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
+ dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
+ }
+ break;
+ case BGX_MODE_XLAUI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
+ dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
+ }
+ break;
+ default:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ }
+}
+
+static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+{
+ struct device_node *np_child;
+ u8 lmac = 0;
+
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np;
+ const char *mac;
+
+ phy_np = of_parse_phandle(np_child, "phy-handle", 0);
+ if (phy_np)
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+
+ mac = of_get_mac_address(np_child);
+ if (mac)
+ ether_addr_copy(bgx->lmac[lmac].mac, mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ bgx->lmac[lmac].lmacid = lmac;
+ lmac++;
+ if (lmac == MAX_LMAC_PER_BGX)
+ break;
+ }
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct bgx *bgx = NULL;
+ struct device_node *np;
+ char bgx_sel[5];
+ u8 lmac;
+
+ bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+ if (!bgx)
+ return -ENOMEM;
+ bgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, bgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!bgx->reg_base) {
+ dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+ bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
+
+ bgx_vnic[bgx->bgx_id] = bgx;
+ bgx_get_qlm_mode(bgx);
+
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (np)
+ bgx_init_of(bgx, np);
+
+ bgx_init_hw(bgx);
+
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+ if (err) {
+ dev_err(dev, "BGX%d failed to enable lmac%d\n",
+ bgx->bgx_id, lmac);
+ goto err_enable;
+ }
+ }
+
+ return 0;
+
+err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ u8 lmac;
+
+ /* Disable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+ .name = DRV_NAME,
+ .id_table = bgx_id_table,
+ .probe = bgx_probe,
+ .remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+ pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644
index 000000000000..ba4f53b7cc2c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+#define MAX_BGX_PER_CN88XX 2
+#define MAX_LMAC_PER_BGX 4
+#define MAX_BGX_CHANS_PER_LMAC 16
+#define MAX_DMAC_PER_LMAC 8
+#define MAX_FRAME_SIZE 9216
+
+#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
+
+#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
+
+/* Registers */
+#define BGX_CMRX_CFG 0x00
+#define CMR_PKT_TX_EN BIT_ULL(13)
+#define CMR_PKT_RX_EN BIT_ULL(14)
+#define CMR_EN BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG 0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP 0x60
+#define BGX_CMRX_RX_STAT0 0x70
+#define BGX_CMRX_RX_STAT1 0x78
+#define BGX_CMRX_RX_STAT2 0x80
+#define BGX_CMRX_RX_STAT3 0x88
+#define BGX_CMRX_RX_STAT4 0x90
+#define BGX_CMRX_RX_STAT5 0x98
+#define BGX_CMRX_RX_STAT6 0xA0
+#define BGX_CMRX_RX_STAT7 0xA8
+#define BGX_CMRX_RX_STAT8 0xB0
+#define BGX_CMRX_RX_STAT9 0xB8
+#define BGX_CMRX_RX_STAT10 0xC0
+#define BGX_CMRX_RX_BP_DROP 0xC8
+#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMR_RX_DMACX_CAM 0x200
+#define RX_DMACX_CAM_EN BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x) (x << 49)
+#define RX_DMAC_COUNT 32
+#define BGX_CMR_RX_STREERING 0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT 8
+#define BGX_CMR_CHAN_MSK_AND 0x450
+#define BGX_CMR_BIST_STATUS 0x460
+#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_STAT0 0x600
+#define BGX_CMRX_TX_STAT1 0x608
+#define BGX_CMRX_TX_STAT2 0x610
+#define BGX_CMRX_TX_STAT3 0x618
+#define BGX_CMRX_TX_STAT4 0x620
+#define BGX_CMRX_TX_STAT5 0x628
+#define BGX_CMRX_TX_STAT6 0x630
+#define BGX_CMRX_TX_STAT7 0x638
+#define BGX_CMRX_TX_STAT8 0x640
+#define BGX_CMRX_TX_STAT9 0x648
+#define BGX_CMRX_TX_STAT10 0x650
+#define BGX_CMRX_TX_STAT11 0x658
+#define BGX_CMRX_TX_STAT12 0x660
+#define BGX_CMRX_TX_STAT13 0x668
+#define BGX_CMRX_TX_STAT14 0x670
+#define BGX_CMRX_TX_STAT15 0x678
+#define BGX_CMRX_TX_STAT16 0x680
+#define BGX_CMRX_TX_STAT17 0x688
+#define BGX_CMR_TX_LMACS 0x1000
+
+#define BGX_SPUX_CONTROL1 0x10000
+#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_RESET BIT_ULL(15)
+#define BGX_SPUX_STATUS1 0x10008
+#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
+#define BGX_SPUX_STATUS2 0x10020
+#define SPU_STATUS2_RCVFLT BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS 0x10028
+#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1 0x10030
+#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL 0x10068
+#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
+#define BGX_SPUX_BR_PMD_LD_REP 0x10090
+#define BGX_SPUX_FEC_CONTROL 0x100A0
+#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL 0x100C8
+#define SPU_AN_CTL_AN_EN BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
+#define BGX_SPUX_AN_ADV 0x100D8
+#define BGX_SPUX_MISC_CONTROL 0x10218
+#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
+#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S 0x10228
+#define BGX_SPUX_INT_ENA_W1C 0x10230
+#define BGX_SPUX_INT_ENA_W1S 0x10238
+#define BGX_SPU_DBG_CONTROL 0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_JABBER 0x20030
+#define BGX_SMUX_RX_CTL 0x20048
+#define SMU_RX_CTL_STATUS (3ull << 0)
+#define BGX_SMUX_TX_APPEND 0x20100
+#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_INT 0x20140
+#define BGX_SMUX_TX_CTL 0x20178
+#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
+#define BGX_SMUX_TX_THRESH 0x20180
+#define BGX_SMUX_CTL 0x20200
+#define SMU_CTL_RX_IDLE BIT_ULL(0)
+#define SMU_CTL_TX_IDLE BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL 0x30000
+#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
+#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
+#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_RESET BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
+#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+#define BGX_GMP_GMI_PRTX_CFG 0x38020
+#define GMI_PORT_CFG_SPEED BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER 0x38038
+#define BGX_GMP_GMI_TXX_THRESH 0x38210
+#define BGX_GMP_GMI_TXX_APPEND 0x38218
+#define BGX_GMP_GMI_TXX_SLOT 0x38220
+#define BGX_GMP_GMI_TXX_BURST 0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL 0x400008
+#define BGX_MSIX_PBA_0 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS 30
+#define BGX_LMAC_VEC_OFFSET 7
+#define BGX_MSIX_VEC_SHIFT 4
+
+#define CMRX_INT 0
+#define SPUX_INT 1
+#define SMUX_RX_INT 2
+#define SMUX_TX_INT 3
+#define GMPX_PCS_INT 4
+#define GMPX_GMI_RX_INT 5
+#define GMPX_GMI_TX_INT 6
+#define CMR_MEM_INT 28
+#define SPU_MEM_INT 29
+
+#define LMAC_INTR_LINK_UP BIT(0)
+#define LMAC_INTR_LINK_DOWN BIT(1)
+
+/* RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+ MCAST_MODE_REJECT,
+ MCAST_MODE_ACCEPT,
+ MCAST_MODE_CAM_FILTER,
+ RSVD
+};
+
+#define BCAST_ACCEPT 1
+#define CAM_ACCEPT 1
+
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+ u64 rx_stats[BGX_RX_STATS_COUNT];
+ u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+ BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+ BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
+ BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+ BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+ BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+};
+
+enum qlm_mode {
+ QLM_MODE_SGMII, /* SGMII, each lane independent */
+ QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
+ QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
+ QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
+ QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
+ QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
+ QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+};
+
+#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 524d11098c56..2e70228272b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -46,17 +46,19 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
#include <asm/io.h>
#include "cxgb4_uld.h"
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
- MAX_NPORTS = 4, /* max # of ports */
- SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
- ID_LEN = 16, /* ID length */
- PN_LEN = 16, /* Part Number length */
+ MAX_NPORTS = 4, /* max # of ports */
+ SERNUM_LEN = 24, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
+ MACADDR_LEN = 12, /* MAC Address length */
};
enum {
@@ -198,23 +200,45 @@ struct lb_port_stats {
};
struct tp_tcp_stats {
- u32 tcpOutRsts;
- u64 tcpInSegs;
- u64 tcpOutSegs;
- u64 tcpRetransSegs;
+ u32 tcp_out_rsts;
+ u64 tcp_in_segs;
+ u64 tcp_out_segs;
+ u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+ u32 frames;
+ u32 drops;
+ u64 octets;
+};
+
+struct tp_fcoe_stats {
+ u32 frames_ddp;
+ u32 frames_drop;
+ u64 octets_ddp;
};
struct tp_err_stats {
- u32 macInErrs[4];
- u32 hdrInErrs[4];
- u32 tcpInErrs[4];
- u32 tnlCongDrops[4];
- u32 ofldChanDrops[4];
- u32 tnlTxDrops[4];
- u32 ofldVlanDrops[4];
- u32 tcp6InErrs[4];
- u32 ofldNoNeigh;
- u32 ofldCongDefer;
+ u32 mac_in_errs[4];
+ u32 hdr_in_errs[4];
+ u32 tcp_in_errs[4];
+ u32 tnl_cong_drops[4];
+ u32 ofld_chan_drops[4];
+ u32 tnl_tx_drops[4];
+ u32 ofld_vlan_drops[4];
+ u32 tcp6_in_errs[4];
+ u32 ofld_no_neigh;
+ u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+ u32 req[4];
+ u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+ u32 rqe_dfr_pkt;
+ u32 rqe_dfr_mod;
};
struct sge_params {
@@ -224,7 +248,6 @@ struct sge_params {
};
struct tp_params {
- unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
unsigned int la_mask; /* what events are recorded by TP LA */
unsigned short tx_modq_map; /* TX modulation scheduler queue to */
@@ -259,6 +282,7 @@ struct vpd_params {
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
+ u8 na[MACADDR_LEN + 1];
};
struct pci_params {
@@ -273,6 +297,7 @@ struct pci_params {
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -284,6 +309,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
struct devlog_params {
@@ -292,6 +321,15 @@ struct devlog_params {
u32 size; /* size of log */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
@@ -317,6 +355,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char bypass;
@@ -328,6 +367,17 @@ struct adapter_params {
unsigned int max_ird_adapter; /* Max read depth per adapter */
};
+/* State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+ unsigned int idma_1s_thresh; /* 1s threshold in Core Clock ticks */
+ unsigned int idma_stalled[2]; /* synthesized stalled timers in HZ */
+ unsigned int idma_state[2]; /* IDMA Hang detect state */
+ unsigned int idma_qid[2]; /* IDMA Hung Ingress Queue ID */
+ unsigned int idma_warn[2]; /* time to warning in HZ */
+};
+
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
@@ -421,6 +471,7 @@ struct port_info {
u8 rss_mode;
struct link_config link_cfg;
u16 *rss;
+ struct port_stats stats_base;
#ifdef CONFIG_CHELSIO_T4_DCB
struct port_dcb_info dcb; /* Data Center Bridging support */
#endif
@@ -630,12 +681,7 @@ struct sge {
u32 fl_align; /* response queue message alignment */
u32 fl_starve_thres; /* Free List starvation threshold */
- /* State variables for detecting an SGE Ingress DMA hang */
- unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
- unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
- unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
- unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
-
+ struct sge_idma_monitor_state idma_monitor;
unsigned int egr_start;
unsigned int egr_sz;
unsigned int ingr_start;
@@ -644,6 +690,7 @@ struct sge {
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
unsigned long *starving_fl;
unsigned long *txq_maperr;
+ unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@@ -665,6 +712,12 @@ struct l2t_data;
#endif
+struct doorbell_stats {
+ u32 db_drop;
+ u32 db_empty;
+ u32 db_full;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -672,7 +725,7 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
- unsigned int fn;
+ unsigned int pf;
unsigned int flags;
enum chip_type chip;
@@ -682,13 +735,12 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- unsigned int wol;
-
struct {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
+ struct doorbell_stats db_stats;
struct sge sge;
struct net_device *port[MAX_NPORTS];
@@ -843,6 +895,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
+}
+
static inline int is_t5(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
@@ -887,6 +949,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
}
/**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
+/**
* netdev2pinfo - return the port_info structure associated with a net_device
* @dev: the netdev
*
@@ -1055,7 +1133,7 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd);
+ struct sge_fl *fl, rspq_handler_t hnd, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid);
@@ -1095,6 +1173,19 @@ static inline int is_bypass_device(int device)
}
}
+static inline int is_10gbt_device(int device)
+{
+ /* this should be set based upon device capabilities */
+ switch (device) {
+ case 0x4409:
+ case 0x4486:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
@@ -1117,9 +1208,19 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout);
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok);
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+ const void *cmd, int size, void *rpl,
+ int timeout)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+ timeout);
+}
+
static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
int size, void *rpl)
{
@@ -1147,10 +1248,14 @@ void t4_intr_disable(struct adapter *adapter);
int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc);
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
+u32 t4_get_util_window(struct adapter *adap);
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
@@ -1165,10 +1270,16 @@ unsigned int t4_get_regs_len(struct adapter *adapter);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_phy_fw(struct adapter *adap,
+ int win, spinlock_t *lock,
+ int (*phy_fw_version)(const u8 *, size_t),
+ const u8 *phy_fw_data, size_t phy_fw_size);
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
const u8 *fw_data, unsigned int size, int force);
@@ -1182,7 +1293,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
int t4_prep_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
u64 *pbar2_qoffset,
@@ -1195,12 +1306,15 @@ int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
int t4_read_rss(struct adapter *adapter, u16 *entries);
void t4_read_rss_key(struct adapter *adapter, u32 *key);
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
@@ -1211,10 +1325,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
u32 t4_read_rss_pf_map(struct adapter *adapter);
u32 t4_read_rss_pf_mask(struct adapter *adapter);
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
- u64 *parity);
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
- u64 *parity);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1226,21 +1337,36 @@ int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
const unsigned int *valp);
int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr);
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
@@ -1259,13 +1385,16 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout);
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val);
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
- unsigned int pf, unsigned int vf,
- unsigned int nparams, const u32 *params,
- const u32 *val);
int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1274,6 +1403,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
@@ -1303,6 +1435,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
@@ -1310,4 +1443,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
void t4_sge_decode_idma_state(struct adapter *adapter, int state);
void t4_free_mem(void *addr);
+void t4_idma_monitor_init(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 371f75e782e5..484eb8c37489 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -182,6 +182,95 @@ static const struct file_operations cim_la_fops = {
.release = seq_release_private
};
+static int cim_pif_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Cntl ID DataBE Addr Data\n");
+ } else if (idx < CIM_PIFLA_SIZE) {
+ seq_printf(seq, " %02x %02x %04x %08x %08x%08x%08x%08x\n",
+ (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f,
+ p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]);
+ } else {
+ if (idx == CIM_PIFLA_SIZE)
+ seq_puts(seq, "\nCntl ID Data\n");
+ seq_printf(seq, " %02x %02x %08x%08x%08x%08x\n",
+ (p[4] >> 6) & 0xff, p[4] & 0x3f,
+ p[3], p[2], p[1], p[0]);
+ }
+ return 0;
+}
+
+static int cim_pif_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1,
+ cim_pif_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_cim_read_pif_la(adap, (u32 *)p->data,
+ (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+ return 0;
+}
+
+static const struct file_operations cim_pif_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_pif_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
+static int cim_ma_la_show(struct seq_file *seq, void *v, int idx)
+{
+ const u32 *p = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "\n");
+ } else if (idx < CIM_MALA_SIZE) {
+ seq_printf(seq, "%02x%08x%08x%08x%08x\n",
+ p[4], p[3], p[2], p[1], p[0]);
+ } else {
+ if (idx == CIM_MALA_SIZE)
+ seq_puts(seq,
+ "\nCnt ID Tag UE Data RDY VLD\n");
+ seq_printf(seq, "%3u %2u %x %u %08x%08x %u %u\n",
+ (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
+ (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
+ (p[1] >> 2) | ((p[2] & 3) << 30),
+ (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
+ p[0] & 1);
+ }
+ return 0;
+}
+
+static int cim_ma_la_open(struct inode *inode, struct file *file)
+{
+ struct seq_tab *p;
+ struct adapter *adap = inode->i_private;
+
+ p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1,
+ cim_ma_la_show);
+ if (!p)
+ return -ENOMEM;
+
+ t4_cim_read_ma_la(adap, (u32 *)p->data,
+ (u32 *)p->data + 5 * CIM_MALA_SIZE);
+ return 0;
+}
+
+static const struct file_operations cim_ma_la_fops = {
+ .owner = THIS_MODULE,
+ .open = cim_ma_la_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private
+};
+
static int cim_qcfg_show(struct seq_file *seq, void *v)
{
static const char * const qname[] = {
@@ -663,6 +752,39 @@ static const struct file_operations pm_stats_debugfs_fops = {
.write = pm_stats_clear
};
+static int tx_rate_show(struct seq_file *seq, void *v)
+{
+ u64 nrate[NCHAN], orate[NCHAN];
+ struct adapter *adap = seq->private;
+
+ t4_get_chan_txrate(adap, nrate, orate);
+ if (adap->params.arch.nchan == NCHAN) {
+ seq_puts(seq, " channel 0 channel 1 "
+ "channel 2 channel 3\n");
+ seq_printf(seq, "NIC B/s: %10llu %10llu %10llu %10llu\n",
+ (unsigned long long)nrate[0],
+ (unsigned long long)nrate[1],
+ (unsigned long long)nrate[2],
+ (unsigned long long)nrate[3]);
+ seq_printf(seq, "Offload B/s: %10llu %10llu %10llu %10llu\n",
+ (unsigned long long)orate[0],
+ (unsigned long long)orate[1],
+ (unsigned long long)orate[2],
+ (unsigned long long)orate[3]);
+ } else {
+ seq_puts(seq, " channel 0 channel 1\n");
+ seq_printf(seq, "NIC B/s: %10llu %10llu\n",
+ (unsigned long long)nrate[0],
+ (unsigned long long)nrate[1]);
+ seq_printf(seq, "Offload B/s: %10llu %10llu\n",
+ (unsigned long long)orate[0],
+ (unsigned long long)orate[1]);
+ }
+ return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+
static int cctrl_tbl_show(struct seq_file *seq, void *v)
{
static const char * const dec_fac[] = {
@@ -1084,41 +1206,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
static int mps_tcam_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
- " VF Replication "
- "P0 P1 P2 P3 ML\n");
- else {
+ struct adapter *adap = seq->private;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (v == SEQ_START_TOKEN) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF "
+ "Replication "
+ " P0 P1 P2 P3 ML\n");
+ else
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF Replication"
+ " P0 P1 P2 P3 ML\n");
+ } else {
u64 mask;
u8 addr[ETH_ALEN];
- struct adapter *adap = seq->private;
+ bool replicate;
unsigned int idx = (uintptr_t)v - 2;
- u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
- u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
- u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
- u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
- u32 rplc[4] = {0, 0, 0, 0};
+ u64 tcamy, tcamx, val;
+ u32 cls_lo, cls_hi, ctl;
+ u32 rplc[8] = {0};
+
+ if (chip_ver > CHELSIO_T5) {
+ /* CtlCmdType - 0: Read, 1: Write
+ * CtlTcamSel - 0: TCAM0, 1: TCAM1
+ * CtlXYBitSel- 0: Y bit, 1: X bit
+ */
+
+ /* Read tcamy */
+ ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+ if (idx < 256)
+ ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+ else
+ ctl |= CTLTCAMINDEX_V(idx - 256) |
+ CTLTCAMSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamy = DMACH_G(val) << 32;
+ tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+ /* Read tcamx. Change the control param */
+ ctl |= CTLXYBITSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamx = DMACH_G(val) << 32;
+ tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ } else {
+ tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+ tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+ }
+
+ cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+ cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
if (tcamx & tcamy) {
seq_printf(seq, "%3u -\n", idx);
goto out;
}
- if (cls_lo & REPLICATE_F) {
+ rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+ if (chip_ver > CHELSIO_T5)
+ replicate = (cls_lo & T6_REPLICATE_F);
+ else
+ replicate = (cls_lo & REPLICATE_F);
+
+ if (replicate) {
struct fw_ldst_cmd ldst_cmd;
int ret;
+ struct fw_ldst_mps_rplc mps_rplc;
+ u32 ldst_addrspc;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspc =
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
ldst_cmd.op_to_addrspace =
htonl(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(
- FW_LDST_ADDRSPC_MPS));
+ ldst_addrspc);
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.mps.fid_ctl =
+ ldst_cmd.u.mps.rplc.fid_idx =
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
- FW_LDST_CMD_CTL_V(idx));
+ FW_LDST_CMD_IDX_V(idx));
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
if (ret)
@@ -1126,30 +1296,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
"replication map for idx %d: %d\n",
idx, -ret);
else {
- rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
- rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
- rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
- rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+ mps_rplc = ldst_cmd.u.mps.rplc;
+ rplc[0] = ntohl(mps_rplc.rplc31_0);
+ rplc[1] = ntohl(mps_rplc.rplc63_32);
+ rplc[2] = ntohl(mps_rplc.rplc95_64);
+ rplc[3] = ntohl(mps_rplc.rplc127_96);
+ if (adap->params.arch.mps_rplc_size > 128) {
+ rplc[4] = ntohl(mps_rplc.rplc159_128);
+ rplc[5] = ntohl(mps_rplc.rplc191_160);
+ rplc[6] = ntohl(mps_rplc.rplc223_192);
+ rplc[7] = ntohl(mps_rplc.rplc255_224);
+ }
}
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
- "%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3], addr[4],
- addr[5], (unsigned long long)mask,
- (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
- PF_G(cls_lo),
- (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
- if (cls_lo & REPLICATE_F)
- seq_printf(seq, " %08x %08x %08x %08x",
- rplc[3], rplc[2], rplc[1], rplc[0]);
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
else
- seq_printf(seq, "%36c", ' ');
- seq_printf(seq, "%4u%3u%3u%3u %#x\n",
- SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
- SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
- (cls_lo >> MULTILISTEN0_S) & 0xf);
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ PF_G(cls_lo),
+ (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+ if (replicate) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, " %08x %08x %08x %08x "
+ "%08x %08x %08x %08x",
+ rplc[7], rplc[6], rplc[5], rplc[4],
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ else
+ seq_printf(seq, " %08x %08x %08x %08x",
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ } else {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, "%72c", ' ');
+ else
+ seq_printf(seq, "%36c", ' ');
+ }
+
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ T6_SRAM_PRIO0_G(cls_lo),
+ T6_SRAM_PRIO1_G(cls_lo),
+ T6_SRAM_PRIO2_G(cls_lo),
+ T6_SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
+ else
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+ SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> MULTILISTEN0_S) & 0xf);
}
out: return 0;
}
@@ -1222,7 +1431,7 @@ static int sensors_show(struct seq_file *seq, void *v)
param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
param, val);
if (ret < 0 || val[0] == 0)
@@ -1416,6 +1625,9 @@ static int rss_config_show(struct seq_file *seq, void *v)
seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
+ else
+ seq_printf(seq, " VfWrAddr: %3d\n",
+ T6_VFWRADDR_G(rssconf));
seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
@@ -1634,14 +1846,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
struct adapter *adapter = inode->i_private;
struct seq_tab *p;
struct rss_vf_conf *vfconf;
- int vf;
+ int vf, vfcount = adapter->params.arch.vfcount;
- p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+ p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
if (!p)
return -ENOMEM;
vfconf = (struct rss_vf_conf *)p->data;
- for (vf = 0; vf < 128; vf++) {
+ for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh);
}
@@ -1959,6 +2171,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+ const struct adapter *adap = filp->private_data;
+ char *buf;
+ ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+ adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, size - 1, "%*pb\n",
+ adap->sge.egr_sz, adap->sge.blocked_fl);
+ len += sprintf(buf + len, "\n");
+ size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+ t4_free_mem(buf);
+ return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long *t;
+ struct adapter *adap = filp->private_data;
+
+ t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+ if (err)
+ return err;
+
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+ t4_free_mem(t);
+ return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+ .owner = THIS_MODULE,
+ .open = blocked_fl_open,
+ .read = blocked_fl_read,
+ .write = blocked_fl_write,
+ .llseek = generic_file_llseek,
+};
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -1978,11 +2245,13 @@ void add_debugfs_files(struct adapter *adap,
int t4_setup_debugfs(struct adapter *adap)
{
int i;
- u32 size;
+ u32 size = 0;
struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, S_IRUSR, 0 },
+ { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 },
+ { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 },
{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
{ "devlog", &devlog_fops, S_IRUSR, 0 },
@@ -2018,10 +2287,12 @@ int t4_setup_debugfs(struct adapter *adap)
{ "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 },
{ "sensors", &sensors_debugfs_fops, S_IRUSR, 0 },
{ "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 },
+ { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 },
{ "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 },
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2048,12 +2319,7 @@ int t4_setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
}
- if (is_t4(adap->params.chip)) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- if (i & EXT_MEM_ENABLE_F)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_G(size));
- } else {
+ if (is_t5(adap->params.chip)) {
if (i & EXT_MEM0_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
add_debugfs_mem(adap, "mc0", MEM_MC0,
@@ -2064,6 +2330,11 @@ int t4_setup_debugfs(struct adapter *adap)
add_debugfs_mem(adap, "mc1", MEM_MC1,
EXT_MEM1_SIZE_G(size));
}
+ } else {
+ if (i & EXT_MEM_ENABLE_F)
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_G(size));
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 10d82b51d7ef..687acf71fa15 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -108,15 +108,82 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
- "WriteCoalSuccess ",
- "WriteCoalFail ",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "tcp_ipv4_out_rsts ",
+ "tcp_ipv4_in_segs ",
+ "tcp_ipv4_out_segs ",
+ "tcp_ipv4_retrans_segs ",
+ "tcp_ipv6_out_rsts ",
+ "tcp_ipv6_in_segs ",
+ "tcp_ipv6_out_segs ",
+ "tcp_ipv6_retrans_segs ",
+ "usm_ddp_frames ",
+ "usm_ddp_octets ",
+ "usm_ddp_drops ",
+ "rdma_no_rqe_mod_defer ",
+ "rdma_no_rqe_pkt_defer ",
+ "tp_err_ofld_no_neigh ",
+ "tp_err_ofld_cong_defer ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+ "--------Channel--------- ",
+ "tp_cpl_requests ",
+ "tp_cpl_responses ",
+ "tp_mac_in_errs ",
+ "tp_hdr_in_errs ",
+ "tp_tcp_in_errs ",
+ "tp_tcp6_in_errs ",
+ "tp_tnl_cong_drops ",
+ "tp_tnl_tx_drops ",
+ "tp_ofld_vlan_drops ",
+ "tp_ofld_chan_drops ",
+ "fcoe_octets_ddp ",
+ "fcoe_frames_ddp ",
+ "fcoe_frames_drop ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+ "-------Loopback----------- ",
+ "octets_ok ",
+ "frames_ok ",
+ "bcast_frames ",
+ "mcast_frames ",
+ "ucast_frames ",
+ "error_frames ",
+ "frames_64 ",
+ "frames_65_to_127 ",
+ "frames_128_to_255 ",
+ "frames_256_to_511 ",
+ "frames_512_to_1023 ",
+ "frames_1024_to_1518 ",
+ "frames_1519_to_max ",
+ "frames_dropped ",
+ "bg0_frames_dropped ",
+ "bg1_frames_dropped ",
+ "bg2_frames_dropped ",
+ "bg3_frames_dropped ",
+ "bg0_frames_trunc ",
+ "bg1_frames_trunc ",
+ "bg2_frames_trunc ",
+ "bg3_frames_trunc ",
};
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(stats_strings);
+ return ARRAY_SIZE(stats_strings) +
+ ARRAY_SIZE(adapter_stats_strings) +
+ ARRAY_SIZE(channel_stats_strings) +
+ ARRAY_SIZE(loopback_stats_strings);
default:
return -EOPNOTSUPP;
}
@@ -168,8 +235,18 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS) {
memcpy(data, stats_strings, sizeof(stats_strings));
+ data += sizeof(stats_strings);
+ memcpy(data, adapter_stats_strings,
+ sizeof(adapter_stats_strings));
+ data += sizeof(adapter_stats_strings);
+ memcpy(data, channel_stats_strings,
+ sizeof(channel_stats_strings));
+ data += sizeof(channel_stats_strings);
+ memcpy(data, loopback_stats_strings,
+ sizeof(loopback_stats_strings));
+ }
}
/* port stats maintained per queue of the port. They should be in the same
@@ -185,6 +262,45 @@ struct queue_port_stats {
u64 gro_merged;
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 tcp_v4_out_rsts;
+ u64 tcp_v4_in_segs;
+ u64 tcp_v4_out_segs;
+ u64 tcp_v4_retrans_segs;
+ u64 tcp_v6_out_rsts;
+ u64 tcp_v6_in_segs;
+ u64 tcp_v6_out_segs;
+ u64 tcp_v6_retrans_segs;
+ u64 frames;
+ u64 octets;
+ u64 drops;
+ u64 rqe_dfr_mod;
+ u64 rqe_dfr_pkt;
+ u64 ofld_no_neigh;
+ u64 ofld_cong_defer;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
+struct channel_stats {
+ u64 cpl_req;
+ u64 cpl_rsp;
+ u64 mac_in_errs;
+ u64 hdr_in_errs;
+ u64 tcp_in_errs;
+ u64 tcp6_in_errs;
+ u64 tnl_cong_drops;
+ u64 tnl_tx_drops;
+ u64 ofld_vlan_drops;
+ u64 ofld_chan_drops;
+ u64 octets_ddp;
+ u64 frames_ddp;
+ u64 frames_drop;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -205,30 +321,121 @@ static void collect_sge_port_stats(const struct adapter *adap,
}
}
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+ struct tp_tcp_stats v4, v6;
+ struct tp_rdma_stats rdma_stats;
+ struct tp_err_stats err_stats;
+ struct tp_usm_stats usm_stats;
+ u64 val1, val2;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6);
+ t4_tp_get_rdma_stats(adap, &rdma_stats);
+ t4_get_usm_stats(adap, &usm_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->db_drop = adap->db_stats.db_drop;
+ s->db_full = adap->db_stats.db_full;
+ s->db_empty = adap->db_stats.db_empty;
+
+ s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+ s->tcp_v4_in_segs = v4.tcp_in_segs;
+ s->tcp_v4_out_segs = v4.tcp_out_segs;
+ s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+ s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+ s->tcp_v6_in_segs = v6.tcp_in_segs;
+ s->tcp_v6_out_segs = v6.tcp_out_segs;
+ s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+ if (is_offload(adap)) {
+ s->frames = usm_stats.frames;
+ s->octets = usm_stats.octets;
+ s->drops = usm_stats.drops;
+ s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+ s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+ }
+
+ s->ofld_no_neigh = err_stats.ofld_no_neigh;
+ s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+ if (!is_t4(adap->params.chip)) {
+ int v;
+
+ v = t4_read_reg(adap, SGE_STAT_CFG_A);
+ if (STATSOURCE_T5_G(v) == 7) {
+ val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+ val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+ s->wc_success = val1 - val2;
+ s->wc_fail = val2;
+ }
+ }
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+ u8 i)
+{
+ struct tp_cpl_stats cpl_stats;
+ struct tp_err_stats err_stats;
+ struct tp_fcoe_stats fcoe_stats;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &cpl_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ t4_get_fcoe_stats(adap, i, &fcoe_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->cpl_req = cpl_stats.req[i];
+ s->cpl_rsp = cpl_stats.rsp[i];
+ s->mac_in_errs = err_stats.mac_in_errs[i];
+ s->hdr_in_errs = err_stats.hdr_in_errs[i];
+ s->tcp_in_errs = err_stats.tcp_in_errs[i];
+ s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+ s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+ s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+ s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+ s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+ s->octets_ddp = fcoe_stats.octets_ddp;
+ s->frames_ddp = fcoe_stats.frames_ddp;
+ s->frames_drop = fcoe_stats.frames_drop;
+}
+
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 val1, val2;
+ struct lb_port_stats s;
+ int i;
+ u64 *p0;
- t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+ t4_get_port_stats_offset(adapter, pi->tx_chan,
+ (struct port_stats *)data,
+ &pi->stats_base);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
- *data = val1 - val2;
- data++;
- *data = val2;
- data++;
- } else {
- memset(data, 0, 2 * sizeof(u64));
- *data += 2;
- }
+ collect_adapter_stats(adapter, (struct adapter_stats *)data);
+ data += sizeof(struct adapter_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ collect_channel_stats(adapter, (struct channel_stats *)data,
+ pi->port_id);
+ data += sizeof(struct channel_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ memset(&s, 0, sizeof(s));
+ t4_get_lb_stats(adapter, pi->port_id, &s);
+
+ p0 = &s.octets;
+ for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+ *data++ = (unsigned long long)*p0++;
}
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -250,7 +457,7 @@ static int restart_autoneg(struct net_device *dev)
return -EAGAIN;
if (p->link_cfg.autoneg != AUTONEG_ENABLE)
return -EINVAL;
- t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
return 0;
}
@@ -267,7 +474,7 @@ static int identify_port(struct net_device *dev,
else
return -EINVAL;
- return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+ return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
}
static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
@@ -439,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -472,7 +679,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -578,7 +785,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
c->rx_coalesce_usecs = qtimer_val(adap, rq);
- c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+ c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
adap->sge.counter_val[rq->pktcnt_idx] : 0;
c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
return 0;
@@ -617,7 +824,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -626,7 +833,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -669,8 +876,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
- if (adapter->fn > 0) {
- u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
if (aligned_offset < start ||
aligned_offset + aligned_len > start + EEPROMPFSIZE)
@@ -740,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
return ret;
}
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- wol->supported = WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = netdev2adap(dev)->wol;
- memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- int err = 0;
- struct port_info *pi = netdev_priv(dev);
-
- if (wol->wolopts & ~WOL_SUPPORTED)
- return -EINVAL;
- t4_wol_magic_enable(pi->adapter, pi->tx_chan,
- (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
- if (wol->wolopts & WAKE_BCAST) {
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
- ~0ULL, 0, false);
- if (!err)
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
- ~6ULL, ~0ULL, BCAST_CRC, true);
- } else {
- t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
- }
- return err;
-}
-
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
@@ -900,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
- .get_wol = get_wol,
- .set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 803d91beec6f..0e27f2266e6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -135,8 +135,14 @@ struct filter_entry {
#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
+#define FW6_FNAME "cxgb4/t6fw.bin"
#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
+#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
+#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
+#define PHY_AQ1202_DEVICEID 0x4409
+#define PHY_BCM84834_DEVICEID 0x4486
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -318,8 +324,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
* level") we need to issue the Set Parameters Commannd
* without sleeping (timeout < 0).
*/
- err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
- &name, &value);
+ err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ &name, &value,
+ -FW_CMD_MAX_TIMEOUT);
if (err)
dev_err(adap->pdev_dev,
@@ -382,7 +389,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
int uc_cnt = netdev_uc_count(dev);
int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/* first do the secondary unicast addresses */
netdev_for_each_uc_addr(ha, dev) {
@@ -439,7 +446,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
ret = set_addr_filters(dev, sleep_ok);
if (ret == 0)
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
@@ -456,7 +463,7 @@ static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/*
* We do not set address filters and promiscuity here, the stack does
@@ -474,7 +481,7 @@ static int link_start(struct net_device *dev)
}
}
if (ret == 0)
- ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+ ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
local_bh_disable();
@@ -856,23 +863,39 @@ static void free_msix_queue_irqs(struct adapter *adap)
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
+ * Should never be called before setting up sge eth rx queues
*/
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
- const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
if (!rss)
return -ENOMEM;
/* map the queue indices to queue ids */
for (i = 0; i < pi->rss_size; i++, queues++)
- rss[i] = q[*queues].rspq.abs_id;
+ rss[i] = rxq[*queues].rspq.abs_id;
- err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ if (!err)
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+ FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+ rss[0]);
kfree(rss);
return err;
}
@@ -885,11 +908,15 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
*/
static int setup_rss(struct adapter *adap)
{
- int i, err;
+ int i, j, err;
for_each_port(adap, i) {
const struct port_info *pi = adap2pinfo(adap, i);
+ /* Fill default values with equal distribution */
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->nqsets;
+
err = cxgb4_write_rss(pi, pi->rss);
if (err)
return err;
@@ -977,7 +1004,7 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
+ uldrx_handler, 0);
if (err)
return err;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1007,7 +1034,7 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL);
+ NULL, NULL, -1);
if (err)
return err;
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1027,7 +1054,7 @@ static int setup_sge_queues(struct adapter *adap)
* new/deleted queues.
*/
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler);
+ msi_idx, NULL, fwevtq_handler, -1);
if (err) {
freeout: t4_free_sge_resources(adap);
return err;
@@ -1044,7 +1071,9 @@ freeout: t4_free_sge_resources(adap);
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl,
- t4_ethrx_handler);
+ t4_ethrx_handler,
+ t4_get_mps_bg_map(adap,
+ pi->tx_chan));
if (err)
goto freeout;
q->rspq.idx = j;
@@ -1324,11 +1353,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return fallback(dev, skb) % dev->real_num_tx_queues;
}
-static inline int is_offload(const struct adapter *adap)
-{
- return adap->params.offload;
-}
-
static int closest_timer(const struct sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
@@ -1389,8 +1413,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
- err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
- &new_idx);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
if (err)
return err;
}
@@ -1398,7 +1422,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
}
us = us == 0 ? 6 : closest_timer(&adap->sge, us);
- q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+ q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
return 0;
}
@@ -1411,7 +1435,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
-1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
@@ -1694,7 +1718,7 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
- (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
__set_bit(0, t->stid_bmap);
return 0;
@@ -1983,11 +2007,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init);
int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- int ret;
- ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
- return ret;
+ return t4_sge_ctxt_flush(adap, adap->mbox);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2042,25 +2063,6 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
-void cxgb4_disable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
- NOCOALESCE_F);
-}
-EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
-
-void cxgb4_enable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
-}
-EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
-
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
struct adapter *adap;
@@ -2100,10 +2102,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
if (offset < mc0_end) {
memtype = MEM_MC0;
memaddr = offset - edc1_end;
- } else if (is_t4(adap->params.chip)) {
- /* T4 only has a single memory channel */
- goto err;
- } else {
+ } else if (is_t5(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
@@ -2114,6 +2113,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* offset beyond the end of any memory */
goto err;
}
+ } else {
+ /* T4/T6 only has a single memory channel */
+ goto err;
}
}
@@ -2148,7 +2150,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid)
{
- return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+ return t4_bar2_sge_qregs(netdev2adap(dev),
qid,
(qtype == CXGB4_BAR2_QTYPE_EGRESS
? T4_BAR2_QTYPE_EGRESS
@@ -2278,9 +2280,13 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ else
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -2342,7 +2348,7 @@ static void process_db_drop(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- } else {
+ } else if (is_t5(adap->params.chip)) {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
@@ -2350,7 +2356,7 @@ static void process_db_drop(struct work_struct *work)
unsigned int bar2_qid;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+ ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
&bar2_qoffset, &bar2_qid);
if (ret)
dev_err(adap->pdev_dev, "doorbell drop recovery: "
@@ -2363,7 +2369,8 @@ static void process_db_drop(struct work_struct *work)
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
void t4_db_full(struct adapter *adap)
@@ -2393,7 +2400,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
unsigned short i;
lli.pdev = adap->pdev;
- lli.pf = adap->fn;
+ lli.pf = adap->pf;
lli.l2t = adap->l2t;
lli.tids = &adap->tids;
lli.ports = adap->port;
@@ -2432,6 +2439,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.max_ordird_qp = adap->params.max_ordird_qp;
lli.max_ird_adapter = adap->params.max_ird_adapter;
lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+ lli.nodeid = dev_to_node(adap->pdev_dev);
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -2729,7 +2737,7 @@ static int cxgb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+ return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
/* Return an error number if the indicated filter isn't writable ...
@@ -2873,7 +2881,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
spin_unlock(&adapter->stats_lock);
return ns;
}
- t4_get_port_stats(adapter, p->tx_chan, &stats);
+ t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+ &p->stats_base);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = stats.tx_octets;
@@ -2932,7 +2941,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
} else
return -EINVAL;
- mbox = pi->adapter->fn;
+ mbox = pi->adapter->pf;
if (cmd == SIOCGMIIREG)
ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
data->reg_num, &data->val_out);
@@ -2959,7 +2968,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
@@ -2975,7 +2984,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+ ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
pi->xact_addr_filt, addr->sa_data, true, true);
if (ret < 0)
return ret;
@@ -3034,86 +3043,11 @@ void t4_fatal_err(struct adapter *adap)
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
-/* Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
-{
- struct fw_ldst_cmd ldst_cmd;
- u32 val;
- int ret;
-
- /* Construct and send the Firmware LDST Command to retrieve the
- * specified PCI-E Configuration Space register.
- */
- memset(&ldst_cmd, 0, sizeof(ldst_cmd));
- ldst_cmd.op_to_addrspace =
- htonl(FW_CMD_OP_V(FW_LDST_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
- ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
- ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
- ldst_cmd.u.pcie.r = reg;
- ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
-
- /* If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (ret == 0)
- val = ntohl(ldst_cmd.u.pcie.data[0]);
- else
- t4_hw_pci_read_cfg4(adap, reg, &val);
-
- return val;
-}
-
static void setup_memwin(struct adapter *adap)
{
- u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
+ u32 nic_win_base = t4_get_util_window(adap);
- if (is_t4(adap->params.chip)) {
- u32 bar0;
-
- /* Truncation intentional: we only read the bottom 32-bits of
- * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
- * mechanism to read BAR0 instead of using
- * pci_resource_start() because we could be operating from
- * within a Virtual Machine which is trapping our accesses to
- * our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will
- * be coming across the PCI-E link.
- */
- bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
- adap->t4_bar0 = bar0;
-
- mem_win0_base = bar0 + MEMWIN0_BASE;
- mem_win1_base = bar0 + MEMWIN1_BASE;
- mem_win2_base = bar0 + MEMWIN2_BASE;
- mem_win2_aperture = MEMWIN2_APERTURE;
- } else {
- /* For T5, only relative offset inside the PCIe BAR is passed */
- mem_win0_base = MEMWIN0_BASE;
- mem_win1_base = MEMWIN1_BASE;
- mem_win2_base = MEMWIN2_BASE_T5;
- mem_win2_aperture = MEMWIN2_APERTURE_T5;
- }
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
- mem_win0_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
- mem_win1_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
- mem_win2_base | BIR_V(0) |
- WINDOW_V(ilog2(mem_win2_aperture) - 10));
- t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
+ t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
}
static void setup_memwin_rdma(struct adapter *adap)
@@ -3147,7 +3081,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3163,18 +3097,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
- ret = t4_config_glbl_rss(adap, adap->fn,
+ ret = t4_config_glbl_rss(adap, adap->pf,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
FW_CMD_CAP_PF);
if (ret < 0)
@@ -3218,7 +3152,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
/* get basic stuff going */
- return t4_early_init(adap, adap->fn);
+ return t4_early_init(adap, adap->pf);
}
/*
@@ -3274,6 +3208,142 @@ static int adap_init0_tweaks(struct adapter *adapter)
return 0;
}
+/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
+ * unto themselves and they contain their own firmware to perform their
+ * tasks ...
+ */
+static int phy_aq1202_version(const u8 *phy_fw_data,
+ size_t phy_fw_size)
+{
+ int offset;
+
+ /* At offset 0x8 you're looking for the primary image's
+ * starting offset which is 3 Bytes wide
+ *
+ * At offset 0xa of the primary image, you look for the offset
+ * of the DRAM segment which is 3 Bytes wide.
+ *
+ * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
+ * wide
+ */
+ #define be16(__p) (((__p)[0] << 8) | (__p)[1])
+ #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
+ #define le24(__p) (le16(__p) | ((__p)[2] << 16))
+
+ offset = le24(phy_fw_data + 0x8) << 12;
+ offset = le24(phy_fw_data + offset + 0xa);
+ return be16(phy_fw_data + offset + 0x27e);
+
+ #undef be16
+ #undef le16
+ #undef le24
+}
+
+static struct info_10gbt_phy_fw {
+ unsigned int phy_fw_id; /* PCI Device ID */
+ char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
+ int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
+ int phy_flash; /* Has FLASH for PHY Firmware */
+} phy_info_array[] = {
+ {
+ PHY_AQ1202_DEVICEID,
+ PHY_AQ1202_FIRMWARE,
+ phy_aq1202_version,
+ 1,
+ },
+ {
+ PHY_BCM84834_DEVICEID,
+ PHY_BCM84834_FIRMWARE,
+ NULL,
+ 0,
+ },
+ { 0, NULL, NULL },
+};
+
+static struct info_10gbt_phy_fw *find_phy_info(int devid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
+ if (phy_info_array[i].phy_fw_id == devid)
+ return &phy_info_array[i];
+ }
+ return NULL;
+}
+
+/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
+ * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
+ * we return a negative error number. If we transfer new firmware we return 1
+ * (from t4_load_phy_fw()). If we don't do anything we return 0.
+ */
+static int adap_init0_phy(struct adapter *adap)
+{
+ const struct firmware *phyf;
+ int ret;
+ struct info_10gbt_phy_fw *phy_info;
+
+ /* Use the device ID to determine which PHY file to flash.
+ */
+ phy_info = find_phy_info(adap->pdev->device);
+ if (!phy_info) {
+ dev_warn(adap->pdev_dev,
+ "No PHY Firmware file found for this PHY\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
+ * use that. The adapter firmware provides us with a memory buffer
+ * where we can load a PHY firmware file from the host if we want to
+ * override the PHY firmware File in flash.
+ */
+ ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
+ adap->pdev_dev);
+ if (ret < 0) {
+ /* For adapters without FLASH attached to PHY for their
+ * firmware, it's obviously a fatal error if we can't get the
+ * firmware to the adapter. For adapters with PHY firmware
+ * FLASH storage, it's worth a warning if we can't find the
+ * PHY Firmware but we'll neuter the error ...
+ */
+ dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
+ "/lib/firmware/%s, error %d\n",
+ phy_info->phy_fw_file, -ret);
+ if (phy_info->phy_flash) {
+ int cur_phy_fw_ver = 0;
+
+ t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ dev_warn(adap->pdev_dev, "continuing with, on-adapter "
+ "FLASH copy, version %#x\n", cur_phy_fw_ver);
+ ret = 0;
+ }
+
+ return ret;
+ }
+
+ /* Load PHY Firmware onto adapter.
+ */
+ ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
+ phy_info->phy_fw_version,
+ (u8 *)phyf->data, phyf->size);
+ if (ret < 0)
+ dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+ -ret);
+ else if (ret > 0) {
+ int new_phy_fw_ver = 0;
+
+ if (phy_info->phy_fw_version)
+ new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
+ phyf->size);
+ dev_info(adap->pdev_dev, "Successfully transferred PHY "
+ "Firmware /lib/firmware/%s, version %#x\n",
+ phy_info->phy_fw_file, new_phy_fw_ver);
+ }
+
+ release_firmware(phyf);
+
+ return ret;
+}
+
/*
* Attempt to initialize the adapter via a Firmware Configuration File.
*/
@@ -3298,6 +3368,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
goto bye;
}
+ /* If this is a 10Gb/s-BT adapter make sure the chip-external
+ * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
+ * to be performed after any global adapter RESET above since some
+ * PHYs only have local RAM copies of the PHY firmware.
+ */
+ if (is_10gbt_device(adapter->pdev->device)) {
+ ret = adap_init0_phy(adapter);
+ if (ret < 0)
+ goto bye;
+ }
/*
* If we have a T4 configuration file under /lib/firmware/cxgb4/,
* then use that. Otherwise, use the configuration file stored
@@ -3310,6 +3390,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
break;
+ case CHELSIO_T6:
+ fw_config_file = FW6_CFNAME;
+ break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
adapter->pdev->device);
@@ -3335,7 +3418,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
- adapter->fn, 0, 1, params, val);
+ adapter->pf, 0, 1, params, val);
if (ret == 0) {
/*
* For t4_memory_rw() below addresses and
@@ -3506,7 +3589,24 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
+ }, {
+ .chip = CHELSIO_T6,
+ .fs_name = FW6_CFNAME,
+ .fw_mod_name = FW6_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T6,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+ .intfver_nic = FW_INTFVER(T6, NIC),
+ .intfver_vnic = FW_INTFVER(T6, VNIC),
+ .intfver_ofld = FW_INTFVER(T6, OFLD),
+ .intfver_ri = FW_INTFVER(T6, RI),
+ .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T6, FCOE),
+ },
}
+
};
static struct fw_info *find_fw_info(int chip)
@@ -3612,7 +3712,7 @@ static int adap_init0(struct adapter *adap)
* the firmware. On the other hand, we need these fairly early on
* so we do this right after getting ahold of the firmware.
*/
- ret = get_vpd_params(adap, &adap->params.vpd);
+ ret = t4_get_vpd_params(adap, &adap->params.vpd);
if (ret < 0)
goto bye;
@@ -3624,7 +3724,7 @@ static int adap_init0(struct adapter *adap)
v =
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
@@ -3647,7 +3747,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
params, val);
/* If the firmware doesn't support Configuration Files,
@@ -3706,7 +3806,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
params[5] = FW_PARAM_PFVF(IQFLINT_START);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_start = val[0];
@@ -3724,7 +3824,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(EQ_END);
params[1] = FW_PARAM_PFVF(IQFLINT_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
@@ -3745,7 +3845,7 @@ static int adap_init0(struct adapter *adap)
}
/* Allocate the memory for the vaious egress queue bitmaps
- * ie starving_fl and txq_maperr.
+ * ie starving_fl, txq_maperr and blocked_fl.
*/
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
sizeof(long), GFP_KERNEL);
@@ -3761,9 +3861,18 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+#ifdef CONFIG_DEBUG_FS
+ adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.blocked_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+#endif
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->clipt_start = val[0];
@@ -3772,7 +3881,7 @@ static int adap_init0(struct adapter *adap)
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
/* If Active filter size is set we enable establishing
* offload connection through firmware work request
*/
@@ -3789,7 +3898,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
val[0] = 1;
- (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
/*
* Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
@@ -3801,7 +3910,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ulptx_memwrite_dsgl = false;
} else {
params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1, params, val);
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
}
@@ -3827,7 +3936,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3865,7 +3974,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3882,7 +3991,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
val);
if (ret < 0)
goto bye;
@@ -3895,7 +4004,7 @@ static int adap_init0(struct adapter *adap)
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
@@ -3913,7 +4022,7 @@ static int adap_init0(struct adapter *adap)
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
params, val);
if (ret < 0)
goto bye;
@@ -3959,8 +4068,8 @@ static int adap_init0(struct adapter *adap)
adap->params.b_wnd);
}
t4_init_sge_params(adap);
- t4_init_tp_params(adap);
adap->flags |= FW_OK;
+ t4_init_tp_params(adap);
return 0;
/*
@@ -3973,6 +4082,9 @@ bye:
kfree(adap->sge.ingr_map);
kfree(adap->sge.starving_fl);
kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adap->sge.blocked_fl);
+#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -4040,7 +4152,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
+ if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
@@ -4049,7 +4161,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
for_each_port(adap, i) {
struct port_info *p = adap2pinfo(adap, i);
- ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+ ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
NULL, NULL);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4340,7 +4452,12 @@ static int enable_msix(struct adapter *adap)
static int init_rss(struct adapter *adap)
{
- unsigned int i, j;
+ unsigned int i;
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4348,8 +4465,6 @@ static int init_rss(struct adapter *adap)
pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
if (!pi->rss)
return -ENOMEM;
- for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -4413,15 +4528,23 @@ static void free_some_resources(struct adapter *adapter)
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adapter->sge.blocked_fl);
+#endif
disable_msi(adapter);
for_each_port(adapter, i)
if (adapter->port[i]) {
+ struct port_info *pi = adap2pinfo(adapter, i);
+
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
kfree(adap2pinfo(adapter, i)->rss);
free_netdev(adapter->port[i]);
}
if (adapter->flags & FW_OK)
- t4_fw_bye(adapter, adapter->fn);
+ t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
@@ -4512,7 +4635,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
- adapter->fn = func;
+ adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -4532,7 +4655,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) {
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
- adapter->fn);
+ adapter->pf);
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
@@ -4555,10 +4678,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+ bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
@@ -4607,10 +4735,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_port_init(adapter, func, func, 0);
if (err)
goto out_free_dev;
+ } else if (adapter->params.nports == 1) {
+ /* If we don't have a connection to the firmware -- possibly
+ * because of an error -- grab the raw VPD parameters so we
+ * can set the proper MAC Address on the debug network
+ * interface that we've created.
+ */
+ u8 hw_addr[ETH_ALEN];
+ u8 *na = adapter->params.vpd.na;
+
+ err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+ if (!err) {
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+ t4_set_hw_addr(adapter, 0, hw_addr);
+ }
}
- /*
- * Configure queues and allocate tables now, they can be needed as
+ /* Configure queues and allocate tables now, they can be needed as
* soon as the first register_netdev completes.
*/
cfg_queues(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 78ab4d406ce2..14e8110b5dbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -264,6 +264,7 @@ struct cxgb4_lld_info {
unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
unsigned int max_ird_adapter; /* Max IRD memory per adapter */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ int nodeid; /* device numa node id */
};
struct cxgb4_uld_info {
@@ -297,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
-void cxgb4_disable_db_coalescing(struct net_device *dev);
-void cxgb4_enable_db_coalescing(struct net_device *dev);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
u64 cxgb4_read_sge_timestamp(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 0d2eddab04ef..6b7c37fd0252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -100,16 +100,6 @@
*/
#define TX_QCHECK_PERIOD (HZ / 2)
-/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
- * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
- * State Machines in the same state for this amount of time (in HZ) then we'll
- * issue a warning about a potential hang. We'll repeat the warning as the
- * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
- * the situation clears. If the situation clears, we'll note that as well.
- */
-#define SGE_IDMA_WARN_THRESH (1 * HZ)
-#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
-
/*
* Max number of Tx descriptors to be reclaimed by the Tx timer.
*/
@@ -532,14 +522,17 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
- u32 val;
if (q->pend_cred >= 8) {
+ u32 val = adap->params.arch.sge_fl_db;
+
if (is_t4(adap->params.chip))
- val = PIDX_V(q->pend_cred / 8);
+ val |= PIDX_V(q->pend_cred / 8);
else
- val = PIDX_T5_V(q->pend_cred / 8) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(q->pend_cred / 8);
+
+ /* Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
wmb();
/* If we don't have access to the new User Doorbell (T5+), use
@@ -594,6 +587,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
int node;
+#ifdef CONFIG_DEBUG_FS
+ if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+ goto out;
+#endif
+
gfp |= __GFP_NOWARN;
node = dev_to_node(adap->pdev_dev);
@@ -930,7 +928,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
- wmb(); /* write descriptors before telling HW */
+ /* Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ wmb();
/* If we don't have access to the new User Doorbell (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
@@ -1032,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1047,7 +1048,7 @@ nocsum: /*
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
@@ -1063,15 +1064,21 @@ nocsum: /*
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
@@ -1112,11 +1119,11 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
return -ENOTSUPP;
/* FC CRC offload */
- *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
- TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
- TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
- TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
- TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+ *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+ TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+ TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+ TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+ TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
return 0;
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1130,7 +1137,6 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -1143,6 +1149,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
bool immediate = false;
+ int len, max_pkt_len;
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1156,13 +1163,20 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb_get_queue_mapping(skb);
q = &adap->sge.ethtxq[qidx + pi->first_qset];
reclaim_completed_tx(adap, &q->q, true);
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
@@ -1213,23 +1227,29 @@ out_free: dev_kfree_skb_any(skb);
len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE | LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- LSO_IPHDR_LEN(l3hdr_len / 4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->c.ipid_ofst = htons(0);
lso->c.mss = htons(ssi->gso_size);
lso->c.seqno_offset = htonl(0);
if (is_t4(adap->params.chip))
lso->c.len = htonl(skb->len);
else
- lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
+ lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
- cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
@@ -1238,23 +1258,25 @@ out_free: dev_kfree_skb_any(skb);
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(adap->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
q->tx_cso++;
}
}
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
#ifdef CONFIG_CHELSIO_T4_FCOE
if (skb->protocol == htons(ETH_P_FCOE))
- cntrl |= TXPKT_VLAN(
+ cntrl |= TXPKT_VLAN_V(
((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
#endif /* CONFIG_CHELSIO_T4_FCOE */
}
- cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+ cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1964,7 +1986,7 @@ static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
static inline bool is_new_response(const struct rsp_ctrl *r,
const struct sge_rspq *q)
{
- return RSPD_GEN(r->type_gen) == q->gen;
+ return (r->type_gen >> RSPD_GEN_S) == q->gen;
}
/**
@@ -2011,19 +2033,19 @@ static int process_responses(struct sge_rspq *q, int budget)
break;
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl si;
const struct rx_sw_desc *rsd;
u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
if (likely(q->offset > 0)) {
free_rx_bufs(q->adap, &rxq->fl, 1);
q->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
si.tot_len = len;
@@ -2058,7 +2080,7 @@ static int process_responses(struct sge_rspq *q, int budget)
q->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&si, &rxq->fl, frags);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
@@ -2066,7 +2088,7 @@ static int process_responses(struct sge_rspq *q, int budget)
if (unlikely(ret)) {
/* couldn't process descriptor, back off for recovery */
- q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
break;
}
@@ -2090,7 +2112,7 @@ int cxgb_busy_poll(struct napi_struct *napi)
return LL_FLUSH_BUSY;
work_done = process_responses(q, 4);
- params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+ params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
q->next_intr_params = params;
val = CIDXINC_V(work_done) | SEINTARM_V(params);
@@ -2137,7 +2159,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int timer_index;
napi_complete(napi);
- timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+ timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
if (q->adaptive_rx) {
if (work_done > max(timer_pkt_quota[timer_index],
@@ -2147,15 +2169,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
timer_index = timer_index - 1;
timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
- q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
- V_QINTR_CNT_EN;
+ q->next_intr_params =
+ QINTR_TIMER_IDX_V(timer_index) |
+ QINTR_CNT_EN_V(0);
params = q->next_intr_params;
} else {
params = q->next_intr_params;
q->next_intr_params = q->intr_params;
}
} else
- params = QINTR_TIMER_IDX(7);
+ params = QINTR_TIMER_IDX_V(7);
val = CIDXINC_V(work_done) | SEINTARM_V(params);
@@ -2203,7 +2226,7 @@ static unsigned int process_intrq(struct adapter *adap)
break;
dma_rmb();
- if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+ if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
unsigned int qid = ntohl(rc->pldbuflen_qid);
qid -= adap->sge.ingr_start;
@@ -2279,7 +2302,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
static void sge_rx_timer_cb(unsigned long data)
{
unsigned long m;
- unsigned int i, idma_same_state_cnt[2];
+ unsigned int i;
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
@@ -2300,67 +2323,16 @@ static void sge_rx_timer_cb(unsigned long data)
set_bit(id, s->starving_fl);
}
}
+ /* The remainder of the SGE RX Timer Callback routine is dedicated to
+ * global Master PF activities like checking for chip ingress stalls,
+ * etc.
+ */
+ if (!(adap->flags & MASTER_PF))
+ goto done;
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
- idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
- idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-
- for (i = 0; i < 2; i++) {
- u32 debug0, debug11;
-
- /* If the Ingress DMA Same State Counter ("timer") is less
- * than 1s, then we can reset our synthesized Stall Timer and
- * continue. If we have previously emitted warnings about a
- * potential stalled Ingress Queue, issue a note indicating
- * that the Ingress Queue has resumed forward progress.
- */
- if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
- if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
- CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
- i, s->idma_qid[i],
- s->idma_stalled[i]/HZ);
- s->idma_stalled[i] = 0;
- continue;
- }
-
- /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
- * domain. The first time we get here it'll be because we
- * passed the 1s Threshold; each additional time it'll be
- * because the RX Timer Callback is being fired on its regular
- * schedule.
- *
- * If the stall is below our Potential Hung Ingress Queue
- * Warning Threshold, continue.
- */
- if (s->idma_stalled[i] == 0)
- s->idma_stalled[i] = HZ;
- else
- s->idma_stalled[i] += RX_QCHECK_PERIOD;
-
- if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
- continue;
-
- /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
- if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
- continue;
-
- /* Read and save the SGE IDMA State and Queue ID information.
- * We do this every time in case it changes across time ...
- */
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
- debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
- s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
-
- t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
- debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
- s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
-
- CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
- i, s->idma_qid[i], s->idma_state[i],
- s->idma_stalled[i]/HZ, debug0, debug11);
- t4_sge_decode_idma_state(adap, s->idma_state[i]);
- }
+ t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
+done:
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
@@ -2429,7 +2401,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype,
&bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2437,9 +2409,12 @@ static void __iomem *bar2_address(struct adapter *adapter,
return adapter->bar2 + bar2_qoffset;
}
+/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd)
+ struct sge_fl *fl, rspq_handler_t hnd, int cong)
{
int ret, flsz = 0;
struct fw_iq_cmd c;
@@ -2457,12 +2432,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+ FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
- FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+ FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+ FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
@@ -2471,8 +2447,21 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
+ if (cong >= 0)
+ c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
if (fl) {
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
+ */
+ if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+ fl->size = s->fl_starve_thres - 1 + 2 * 8;
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
@@ -2481,17 +2470,25 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
goto fl_nomem;
flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
- c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
- FW_IQ_CMD_FL0FETCHRO_F |
- FW_IQ_CMD_FL0DATARO_F |
- FW_IQ_CMD_FL0PADEN_F);
- c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
- FW_IQ_CMD_FL0FBMAX_V(3));
+ c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
+ FW_IQ_CMD_FL0FETCHRO_F |
+ FW_IQ_CMD_FL0DATARO_F |
+ FW_IQ_CMD_FL0PADEN_F);
+ if (cong >= 0)
+ c.iqns_to_fl0congen |=
+ htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
+ FW_IQ_CMD_FL0CONGCIF_F |
+ FW_IQ_CMD_FL0CONGEN_F);
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -2532,6 +2529,41 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
&fl->bar2_qid);
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
+
+ /* For T5 and later we attempt to set up the Congestion Manager values
+ * of the new RX Ethernet Queue. This should really be handled by
+ * firmware because it's more complex than any host driver wants to
+ * get involved with and it's different per chip and this is almost
+ * certainly wrong. Firmware would be wrong as well, but it would be
+ * a lot easier to fix in one place ... For now we do something very
+ * simple (and hopefully less wrong).
+ */
+ if (!is_t4(adap->params.chip) && cong >= 0) {
+ u32 param, val;
+ int i;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+ FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
+ if (cong == 0) {
+ val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
+ } else {
+ val =
+ CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
+ for (i = 0; i < 4; i++) {
+ if (cong & (1 << i))
+ val |=
+ CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+ }
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Failed to set Congestion"
+ " Manager Context for Ingress Queue %d: %d\n",
+ iq->cntxt_id, -ret);
+ }
+
return 0;
fl_nomem:
@@ -2589,23 +2621,24 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+ FW_EQ_ETH_CMD_PFN_V(adap->pf) |
FW_EQ_ETH_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
- c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
- FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_ETH_CMD_FETCHRO_V(1) |
- FW_EQ_ETH_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
- FW_EQ_ETH_CMD_FBMAX_V(3) |
- FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2637,29 +2670,30 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0, NUMA_NO_NODE);
+ NULL, 0, dev_to_node(adap->pdev_dev));
if (!txq->q.desc)
return -ENOMEM;
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+ FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
FW_EQ_CTRL_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
c.physeqid_pkd = htonl(0);
- c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
- FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_CTRL_CMD_FETCHRO_F |
- FW_EQ_CTRL_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
- FW_EQ_CTRL_CMD_FBMAX_V(3) |
- FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
@@ -2697,21 +2731,22 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+ FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
FW_EQ_OFLD_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
- c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
- FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
- FW_EQ_OFLD_CMD_FETCHRO_F |
- FW_EQ_OFLD_CMD_IQID_V(iqid));
- c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
- FW_EQ_OFLD_CMD_FBMAX_V(3) |
- FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
- FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+ c.fetchszm_to_iqid =
+ htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+ FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+ c.dcaen_to_eqsize =
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+ FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2750,7 +2785,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
- t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
@@ -2805,7 +2840,7 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
if (etq->q.desc) {
- t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
kfree(etq->q.sdesc);
@@ -2824,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (q->q.desc) {
tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
q->q.cntxt_id);
free_tx_desc(adap, &q->q, q->q.in_use, false);
kfree(q->q.sdesc);
@@ -2839,7 +2874,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (cq->q.desc) {
tasklet_kill(&cq->qresume_tsk);
- t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
cq->q.cntxt_id);
__skb_queue_purge(&cq->sendq);
free_txq(adap, &cq->q);
@@ -3023,7 +3058,11 @@ int t4_sge_init(struct adapter *adap)
* Packing Boundary. T5 introduced the ability to specify these
* separately. The actual Ingress Packet Data alignment boundary
* within Packed Buffer Mode is the maximum of these two
- * specifications.
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
*/
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
INGPADBOUNDARY_SHIFT_X);
@@ -3067,11 +3106,14 @@ int t4_sge_init(struct adapter *adap)
egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
s->fl_starve_thres = 2*egress_threshold + 1;
+ t4_idma_monitor_init(adap, &s->idma_monitor);
+
+ /* Set up timers used for recuring callbacks to process RX and TX
+ * administrative tasks.
+ */
setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
- s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
- s->idma_stalled[0] = 0;
- s->idma_stalled[1] = 0;
+
spin_lock_init(&s->intrq_lock);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8578a742f2a..9d93f4c27300 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
- u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
+ u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ req |= ENABLE_F;
+ else
+ req |= T6_ENABLE_F;
if (is_t4(adap->params.chip))
req |= LOCALCFG_F;
@@ -214,8 +219,8 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
dev_alert(adap->pdev_dev,
"FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
- asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
- ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+ asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+ be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
@@ -233,13 +238,14 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
}
/**
- * t4_wr_mbox_meat - send a command to FW through the given mailbox
+ * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
* @adap: the adapter
* @mbox: index of the mailbox to use
* @cmd: the command to write
* @size: command length in bytes
* @rpl: where to optionally store the reply
* @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
*
* Sends the given command to FW through the selected mailbox and waits
* for the FW to execute the command. If @rpl is not %NULL it is used to
@@ -254,8 +260,8 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
* command or FW executes it but signals an error. In the latter case
* the return value is the error code indicated by FW (negated).
*/
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
- void *rpl, bool sleep_ok)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout)
{
static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
@@ -294,7 +300,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+ for (i = 0; i < timeout; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -332,114 +338,11 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
return -ETIMEDOUT;
}
-/**
- * t4_mc_read - read from MC through backdoor accesses
- * @adap: the adapter
- * @addr: address of first byte requested
- * @idx: which MC to access
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from MC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
-{
- int i;
- u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
- u32 mc_bist_status_rdata, mc_bist_data_pattern;
-
- if (is_t4(adap->params.chip)) {
- mc_bist_cmd = MC_BIST_CMD_A;
- mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
- mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
- mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
- mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
- } else {
- mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
- mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
- mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
- mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
- mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
- }
-
- if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
- return -EBUSY;
- t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
- t4_write_reg(adap, mc_bist_cmd_len, 64);
- t4_write_reg(adap, mc_bist_data_pattern, 0xc);
- t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
- BIST_CMD_GAP_V(1));
- i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
- if (i)
- return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
- if (ecc)
- *ecc = t4_read_reg64(adap, MC_DATA(16));
-#undef MC_DATA
- return 0;
-}
-
-/**
- * t4_edc_read - read from EDC through backdoor accesses
- * @adap: the adapter
- * @idx: which EDC to access
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
{
- int i;
- u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
- u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
-
- if (is_t4(adap->params.chip)) {
- edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
- edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
- edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
- edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
- idx);
- edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
- idx);
- } else {
- edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
- edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
- edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
- edc_bist_cmd_data_pattern =
- EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
- edc_bist_status_rdata =
- EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
- }
-
- if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
- return -EBUSY;
- t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
- t4_write_reg(adap, edc_bist_cmd_len, 64);
- t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
- t4_write_reg(adap, edc_bist_cmd,
- BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
- i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
- if (i)
- return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
- if (ecc)
- *ecc = t4_read_reg64(adap, EDC_DATA(16));
-#undef EDC_DATA
- return 0;
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+ FW_CMD_MAX_TIMEOUT);
}
/**
@@ -483,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2 -- T4
- * MEM_MC0 = 2 -- For T5
- * MEM_MC1 = 3 -- For T5
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
@@ -514,7 +416,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
- win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
@@ -625,6 +527,102 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
return 0;
}
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+ u32 val, ldst_addrspace;
+
+ /* If fw_attach != 0, construct and send the Firmware LDST Command to
+ * retrieve the specified PCI-E Configuration Space register.
+ */
+ struct fw_ldst_cmd ldst_cmd;
+ int ret;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
+ ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ ldst_addrspace);
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+ ldst_cmd.u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
+ ldst_cmd.u.pcie.r = reg;
+
+ /* If the LDST Command succeeds, return the result, otherwise
+ * fall through to reading it directly ourselves ...
+ */
+ ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_cmd);
+ if (ret == 0)
+ val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+ else
+ /* Read the desired Configuration Space register via the PCI-E
+ * Backdoor mechanism.
+ */
+ t4_hw_pci_read_cfg4(adap, reg, &val);
+ return val;
+}
+
+/* Get the window based on base passed to it.
+ * Window aperture is currently unhandled, but there is no use case for it
+ * right now
+ */
+static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
+ u32 memwin_base)
+{
+ u32 ret;
+
+ if (is_t4(adap->params.chip)) {
+ u32 bar0;
+
+ /* Truncation intentional: we only read the bottom 32-bits of
+ * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
+ * mechanism to read BAR0 instead of using
+ * pci_resource_start() because we could be operating from
+ * within a Virtual Machine which is trapping our accesses to
+ * our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will
+ * be coming across the PCI-E link.
+ */
+ bar0 = t4_read_pcie_cfg4(adap, pci_base);
+ bar0 &= pci_mask;
+ adap->t4_bar0 = bar0;
+
+ ret = bar0 + memwin_base;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ ret = memwin_base;
+ }
+ return ret;
+}
+
+/* Get the default utility window (win0) used by everyone */
+u32 t4_get_util_window(struct adapter *adap)
+{
+ return t4_get_window(adap, PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
+}
+
+/* Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
+{
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
+ memwin_base | BIR_V(0) |
+ WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
+}
+
/**
* t4_get_regs_len - return the size of the chips register set
* @adapter: the adapter
@@ -640,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
return T4_REGMAP_SIZE;
case CHELSIO_T5:
+ case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
@@ -666,7 +665,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x11fc, 0x123c,
0x1300, 0x173c,
0x1800, 0x18fc,
- 0x3000, 0x30d8,
+ 0x3000, 0x305c,
+ 0x3068, 0x30d8,
0x30e0, 0x5924,
0x5960, 0x59d4,
0x5a00, 0x5af8,
@@ -729,7 +729,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x19238, 0x1924c,
0x193f8, 0x19474,
0x19490, 0x194f8,
- 0x19800, 0x19f30,
+ 0x19800, 0x19f4c,
0x1a000, 0x1a06c,
0x1a0b0, 0x1a120,
0x1a128, 0x1a138,
@@ -878,7 +878,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x27780, 0x2778c,
0x27800, 0x27c38,
0x27c80, 0x27d7c,
- 0x27e00, 0x27e04
+ 0x27e00, 0x27e04,
};
static const unsigned int t5_reg_ranges[] = {
@@ -888,7 +888,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x1280, 0x173c,
0x1800, 0x18fc,
0x3000, 0x3028,
- 0x3060, 0x30d8,
+ 0x3068, 0x30d8,
0x30e0, 0x30fc,
0x3140, 0x357c,
0x35a8, 0x35cc,
@@ -900,7 +900,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x5940, 0x59dc,
0x59fc, 0x5a18,
0x5a60, 0x5a9c,
- 0x5b9c, 0x5bfc,
+ 0x5b94, 0x5bfc,
0x6000, 0x6040,
0x6058, 0x614c,
0x7700, 0x7798,
@@ -1014,27 +1014,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x30800, 0x30834,
0x308c0, 0x30908,
0x30910, 0x309ac,
- 0x30a00, 0x30a04,
- 0x30a0c, 0x30a2c,
+ 0x30a00, 0x30a2c,
0x30a44, 0x30a50,
0x30a74, 0x30c24,
+ 0x30d00, 0x30d00,
0x30d08, 0x30d14,
0x30d1c, 0x30d20,
0x30d3c, 0x30d50,
0x31200, 0x3120c,
0x31220, 0x31220,
0x31240, 0x31240,
- 0x31600, 0x31600,
- 0x31608, 0x3160c,
+ 0x31600, 0x3160c,
0x31a00, 0x31a1c,
- 0x31e04, 0x31e20,
+ 0x31e00, 0x31e20,
0x31e38, 0x31e3c,
0x31e80, 0x31e80,
0x31e88, 0x31ea8,
0x31eb0, 0x31eb4,
0x31ec8, 0x31ed4,
0x31fb8, 0x32004,
- 0x32208, 0x3223c,
+ 0x32200, 0x32200,
+ 0x32208, 0x32240,
+ 0x32248, 0x32280,
+ 0x32288, 0x322c0,
+ 0x322c8, 0x322fc,
0x32600, 0x32630,
0x32a00, 0x32abc,
0x32b00, 0x32b70,
@@ -1074,27 +1077,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x34800, 0x34834,
0x348c0, 0x34908,
0x34910, 0x349ac,
- 0x34a00, 0x34a04,
- 0x34a0c, 0x34a2c,
+ 0x34a00, 0x34a2c,
0x34a44, 0x34a50,
0x34a74, 0x34c24,
+ 0x34d00, 0x34d00,
0x34d08, 0x34d14,
0x34d1c, 0x34d20,
0x34d3c, 0x34d50,
0x35200, 0x3520c,
0x35220, 0x35220,
0x35240, 0x35240,
- 0x35600, 0x35600,
- 0x35608, 0x3560c,
+ 0x35600, 0x3560c,
0x35a00, 0x35a1c,
- 0x35e04, 0x35e20,
+ 0x35e00, 0x35e20,
0x35e38, 0x35e3c,
0x35e80, 0x35e80,
0x35e88, 0x35ea8,
0x35eb0, 0x35eb4,
0x35ec8, 0x35ed4,
0x35fb8, 0x36004,
- 0x36208, 0x3623c,
+ 0x36200, 0x36200,
+ 0x36208, 0x36240,
+ 0x36248, 0x36280,
+ 0x36288, 0x362c0,
+ 0x362c8, 0x362fc,
0x36600, 0x36630,
0x36a00, 0x36abc,
0x36b00, 0x36b70,
@@ -1134,27 +1140,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x38800, 0x38834,
0x388c0, 0x38908,
0x38910, 0x389ac,
- 0x38a00, 0x38a04,
- 0x38a0c, 0x38a2c,
+ 0x38a00, 0x38a2c,
0x38a44, 0x38a50,
0x38a74, 0x38c24,
+ 0x38d00, 0x38d00,
0x38d08, 0x38d14,
0x38d1c, 0x38d20,
0x38d3c, 0x38d50,
0x39200, 0x3920c,
0x39220, 0x39220,
0x39240, 0x39240,
- 0x39600, 0x39600,
- 0x39608, 0x3960c,
+ 0x39600, 0x3960c,
0x39a00, 0x39a1c,
- 0x39e04, 0x39e20,
+ 0x39e00, 0x39e20,
0x39e38, 0x39e3c,
0x39e80, 0x39e80,
0x39e88, 0x39ea8,
0x39eb0, 0x39eb4,
0x39ec8, 0x39ed4,
0x39fb8, 0x3a004,
- 0x3a208, 0x3a23c,
+ 0x3a200, 0x3a200,
+ 0x3a208, 0x3a240,
+ 0x3a248, 0x3a280,
+ 0x3a288, 0x3a2c0,
+ 0x3a2c8, 0x3a2fc,
0x3a600, 0x3a630,
0x3aa00, 0x3aabc,
0x3ab00, 0x3ab70,
@@ -1194,27 +1203,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3c800, 0x3c834,
0x3c8c0, 0x3c908,
0x3c910, 0x3c9ac,
- 0x3ca00, 0x3ca04,
- 0x3ca0c, 0x3ca2c,
+ 0x3ca00, 0x3ca2c,
0x3ca44, 0x3ca50,
0x3ca74, 0x3cc24,
+ 0x3cd00, 0x3cd00,
0x3cd08, 0x3cd14,
0x3cd1c, 0x3cd20,
0x3cd3c, 0x3cd50,
0x3d200, 0x3d20c,
0x3d220, 0x3d220,
0x3d240, 0x3d240,
- 0x3d600, 0x3d600,
- 0x3d608, 0x3d60c,
+ 0x3d600, 0x3d60c,
0x3da00, 0x3da1c,
- 0x3de04, 0x3de20,
+ 0x3de00, 0x3de20,
0x3de38, 0x3de3c,
0x3de80, 0x3de80,
0x3de88, 0x3dea8,
0x3deb0, 0x3deb4,
0x3dec8, 0x3ded4,
0x3dfb8, 0x3e004,
- 0x3e208, 0x3e23c,
+ 0x3e200, 0x3e200,
+ 0x3e208, 0x3e240,
+ 0x3e248, 0x3e280,
+ 0x3e288, 0x3e2c0,
+ 0x3e2c8, 0x3e2fc,
0x3e600, 0x3e630,
0x3ea00, 0x3eabc,
0x3eb00, 0x3eb70,
@@ -1247,7 +1259,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x3fcf0, 0x3fcfc,
0x40000, 0x4000c,
0x40040, 0x40068,
- 0x40080, 0x40144,
+ 0x4007c, 0x40144,
0x40180, 0x4018c,
0x40200, 0x40298,
0x402ac, 0x4033c,
@@ -1275,7 +1287,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x47800, 0x47814,
0x48000, 0x4800c,
0x48040, 0x48068,
- 0x48080, 0x48144,
+ 0x4807c, 0x48144,
0x48180, 0x4818c,
0x48200, 0x48298,
0x482ac, 0x4833c,
@@ -1309,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x114c,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x1250,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5ec0,
+ 0x5ec8, 0x5ec8,
+ 0x6000, 0x6040,
+ 0x6058, 0x6154,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x911c,
+ 0x9400, 0x9470,
+ 0x9600, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0x11000, 0x11014,
+ 0x11048, 0x11110,
+ 0x11118, 0x1117c,
+ 0x11190, 0x11260,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1205c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x192b8,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c80,
+ 0x19c94, 0x19cbc,
+ 0x19ce4, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30070,
+ 0x30100, 0x3015c,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x3088c,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d00, 0x30d3c,
+ 0x30d44, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x32e00, 0x32e2c,
+ 0x32f00, 0x32f2c,
+ 0x33000, 0x330ac,
+ 0x330c0, 0x331ac,
+ 0x331c0, 0x332c4,
+ 0x332e4, 0x333c4,
+ 0x333e4, 0x334ac,
+ 0x334c0, 0x335ac,
+ 0x335c0, 0x336c4,
+ 0x336e4, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x339ac,
+ 0x339c0, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34070,
+ 0x34100, 0x3415c,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x3488c,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d00, 0x34d3c,
+ 0x34d44, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x36e00, 0x36e2c,
+ 0x36f00, 0x36f2c,
+ 0x37000, 0x370ac,
+ 0x370c0, 0x371ac,
+ 0x371c0, 0x372c4,
+ 0x372e4, 0x373c4,
+ 0x373e4, 0x374ac,
+ 0x374c0, 0x375ac,
+ 0x375c0, 0x376c4,
+ 0x376e4, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x379ac,
+ 0x379c0, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413dc,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x4427c,
+ 0x442c0, 0x4447c,
+ 0x444c0, 0x4467c,
+ 0x446c0, 0x4487c,
+ 0x448c0, 0x44a7c,
+ 0x44ac0, 0x44c7c,
+ 0x44cc0, 0x44e7c,
+ 0x44ec0, 0x4507c,
+ 0x450c0, 0x451fc,
+ 0x45800, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x4782c,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -1328,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
default:
dev_err(adap->pdev_dev,
"Unsupported chip version %d\n", chip_version);
@@ -1374,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
}
/**
- * get_vpd_params - read VPD parameters from VPD EEPROM
+ * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM.
*/
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- u32 cclk_param, cclk_val;
- int i, ret, addr;
- int ec, sn, pn;
+ int i, ret = 0, addr;
+ int ec, sn, pn, na;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -1392,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
if (ret < 0)
goto out;
@@ -1457,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
FIND_VPD_KW(pn, "PN");
+ FIND_VPD_KW(na, "NA");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -1469,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
strim(p->pn);
+ memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ strim((char *)p->na);
- /*
- * Ask firmware for the Core Clock since it knows how to translate the
+out:
+ vfree(vpd);
+ return ret;
+}
+
+/**
+ * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ * @adapter: adapter to read
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM and retrieves the Core
+ * Clock. This can only be called after a connection to the firmware
+ * is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /* Grab the raw VPD parameters.
+ */
+ ret = t4_get_raw_vpd_params(adapter, p);
+ if (ret)
+ return ret;
+
+ /* Ask firmware for the Core Clock since it knows how to translate the
* Reference Clock ('V2') VPD field into a Core Clock value ...
*/
cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
- ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1, &cclk_param, &cclk_val);
-out:
- vfree(vpd);
if (ret)
return ret;
p->cclk = cclk_val;
@@ -1618,7 +2000,7 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
if (ret)
return ret;
if (byte_oriented)
- *data = (__force __u32) (htonl(*data));
+ *data = (__force __u32)(cpu_to_be32(*data));
}
return 0;
}
@@ -1941,7 +2323,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap,
* which will keep us "honest" in the future ...
*/
if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
- (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+ (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+ (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
return true;
dev_err(adap->pdev_dev,
@@ -1979,7 +2362,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
"FW image size not multiple of 512 bytes\n");
return -EINVAL;
}
- if (ntohs(hdr->len512) * 512 != size) {
+ if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
dev_err(adap->pdev_dev,
"FW image size differs from size in FW header\n");
return -EINVAL;
@@ -1993,7 +2376,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
- csum += ntohl(p[i]);
+ csum += be32_to_cpu(p[i]);
if (csum != 0xffffffff) {
dev_err(adap->pdev_dev,
@@ -2012,7 +2395,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
* first page with a bad version.
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
- ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
@@ -2039,6 +2422,147 @@ out:
}
/**
+ * t4_phy_fw_ver - return current PHY firmware version
+ * @adap: the adapter
+ * @phy_fw_ver: return value buffer for PHY firmware version
+ *
+ * Returns the current version of external PHY firmware on the
+ * adapter.
+ */
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+{
+ u32 param, val;
+ int ret;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret < 0)
+ return ret;
+ *phy_fw_ver = val;
+ return 0;
+}
+
+/**
+ * t4_load_phy_fw - download port PHY firmware
+ * @adap: the adapter
+ * @win: the PCI-E Memory Window index to use for t4_memory_rw()
+ * @win_lock: the lock to use to guard the memory copy
+ * @phy_fw_version: function to check PHY firmware versions
+ * @phy_fw_data: the PHY firmware image to write
+ * @phy_fw_size: image size
+ *
+ * Transfer the specified PHY firmware to the adapter. If a non-NULL
+ * @phy_fw_version is supplied, then it will be used to determine if
+ * it's necessary to perform the transfer by comparing the version
+ * of any existing adapter PHY firmware with that of the passed in
+ * PHY firmware image. If @win_lock is non-NULL then it will be used
+ * around the call to t4_memory_rw() which transfers the PHY firmware
+ * to the adapter.
+ *
+ * A negative error number will be returned if an error occurs. If
+ * version number support is available and there's no need to upgrade
+ * the firmware, 0 will be returned. If firmware is successfully
+ * transferred to the adapter, 1 will be retured.
+ *
+ * NOTE: some adapters only have local RAM to store the PHY firmware. As
+ * a result, a RESET of the adapter would cause that RAM to lose its
+ * contents. Thus, loading PHY firmware on such adapters must happen
+ * after any FW_RESET_CMDs ...
+ */
+int t4_load_phy_fw(struct adapter *adap,
+ int win, spinlock_t *win_lock,
+ int (*phy_fw_version)(const u8 *, size_t),
+ const u8 *phy_fw_data, size_t phy_fw_size)
+{
+ unsigned long mtype = 0, maddr = 0;
+ u32 param, val;
+ int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
+ int ret;
+
+ /* If we have version number support, then check to see if the adapter
+ * already has up-to-date PHY firmware loaded.
+ */
+ if (phy_fw_version) {
+ new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
+ ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ if (ret < 0)
+ return ret;
+
+ if (cur_phy_fw_ver >= new_phy_fw_vers) {
+ CH_WARN(adap, "PHY Firmware already up-to-date, "
+ "version %#x\n", cur_phy_fw_ver);
+ return 0;
+ }
+ }
+
+ /* Ask the firmware where it wants us to copy the PHY firmware image.
+ * The size of the file requires a special version of the READ coommand
+ * which will pass the file size via the values field in PARAMS_CMD and
+ * retrieve the return value from firmware and place it in the same
+ * buffer values
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ val = phy_fw_size;
+ ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val, 1);
+ if (ret < 0)
+ return ret;
+ mtype = val >> 8;
+ maddr = (val & 0xff) << 16;
+
+ /* Copy the supplied PHY Firmware image to the adapter memory location
+ * allocated by the adapter firmware.
+ */
+ if (win_lock)
+ spin_lock_bh(win_lock);
+ ret = t4_memory_rw(adap, win, mtype, maddr,
+ phy_fw_size, (__be32 *)phy_fw_data,
+ T4_MEMORY_WRITE);
+ if (win_lock)
+ spin_unlock_bh(win_lock);
+ if (ret)
+ return ret;
+
+ /* Tell the firmware that the PHY firmware image has been written to
+ * RAM and it can now start copying it over to the PHYs. The chip
+ * firmware will RESET the affected PHYs as part of this operation
+ * leaving them running the new PHY firmware image.
+ */
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+ FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+ FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+ ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val, 30000);
+
+ /* If we have version number support, then check to see that the new
+ * firmware got loaded properly.
+ */
+ if (phy_fw_version) {
+ ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+ if (ret < 0)
+ return ret;
+
+ if (cur_phy_fw_ver != new_phy_fw_vers) {
+ CH_WARN(adap, "PHY Firmware did not update: "
+ "version on adapter %#x, "
+ "version flashed %#x\n",
+ cur_phy_fw_ver, new_phy_fw_vers);
+ return -ENXIO;
+ }
+ }
+
+ return 1;
+}
+
+/**
* t4_fwcache - firmware cache operation
* @adap: the adapter
* @op : the operation (flush or flush and invalidate)
@@ -2051,7 +2575,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.op_to_vfn =
cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(adap->fn) |
+ FW_PARAMS_CMD_PFN_V(adap->pf) |
FW_PARAMS_CMD_VFN_V(0));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.param[0].mnem =
@@ -2062,6 +2586,61 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
+void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
+ unsigned int *pif_req_wrptr,
+ unsigned int *pif_rsp_wrptr)
+{
+ int i, j;
+ u32 cfg, val, req, rsp;
+
+ cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+ if (cfg & LADBGEN_F)
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+ val = t4_read_reg(adap, CIM_DEBUGSTS_A);
+ req = POLADBGWRPTR_G(val);
+ rsp = PILADBGWRPTR_G(val);
+ if (pif_req_wrptr)
+ *pif_req_wrptr = req;
+ if (pif_rsp_wrptr)
+ *pif_rsp_wrptr = rsp;
+
+ for (i = 0; i < CIM_PIFLA_SIZE; i++) {
+ for (j = 0; j < 6; j++) {
+ t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
+ PILADBGRDPTR_V(rsp));
+ *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
+ *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
+ req++;
+ rsp++;
+ }
+ req = (req + 2) & POLADBGRDPTR_M;
+ rsp = (rsp + 2) & PILADBGRDPTR_M;
+ }
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
+void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
+{
+ u32 cfg;
+ int i, j, idx;
+
+ cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
+ if (cfg & LADBGEN_F)
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
+
+ for (i = 0; i < CIM_MALA_SIZE; i++) {
+ for (j = 0; j < 5; j++) {
+ idx = 8 * i + j;
+ t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
+ PILADBGRDPTR_V(idx));
+ *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
+ *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
+ }
+ }
+ t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
+}
+
void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
{
unsigned int i, j;
@@ -2082,7 +2661,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
FW_PORT_CAP_ANEG)
/**
- * t4_link_start - apply link configuration to MAC/PHY
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
* @phy: the PHY to setup
* @mac: the MAC to setup
* @lc: the requested link configuration
@@ -2094,7 +2673,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
@@ -2107,19 +2686,22 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+ c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+ fc);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else
- c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+ c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2137,11 +2719,13 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
- c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
- c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 =
+ cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+ FW_LEN16(c));
+ c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -2335,6 +2919,7 @@ static void tp_intr_handler(struct adapter *adapter)
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
+ u32 err;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE_F,
@@ -2343,8 +2928,6 @@ static void sge_intr_handler(struct adapter *adapter)
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
- { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
- { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
@@ -2357,13 +2940,19 @@ static void sge_intr_handler(struct adapter *adapter)
0 },
{ ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO_F,
- "SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
+ static struct intr_info t4t5_sge_intr_info[] = {
+ { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+ { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+ { ERR_EGR_CTXT_PRIO_F,
+ "SGE too many priority egress contexts", -1, 0 },
+ { 0 }
+ };
+
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
@@ -2373,8 +2962,23 @@ static void sge_intr_handler(struct adapter *adapter)
t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
- if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
- v != 0)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+ t4t5_sge_intr_info);
+
+ err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+ if (err & ERROR_QID_VALID_F) {
+ dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+ ERROR_QID_G(err));
+ if (err & UNCAPTURED_ERROR_F)
+ dev_err(adapter->pdev_dev,
+ "SGE UNCAPTURED_ERROR set (clearing)\n");
+ t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+ UNCAPTURED_ERROR_F);
+ }
+
+ if (v != 0)
t4_fatal_err(adapter);
}
@@ -2547,6 +3151,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
static const struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2556,7 +3161,18 @@ static void le_intr_handler(struct adapter *adap)
{ 0 }
};
- if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
+ static struct intr_info t6_le_intr_info[] = {
+ { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+ { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { TCAMINTPERR_F, "LE parity error", -1, 1 },
+ { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+ (chip <= CHELSIO_T5) ?
+ le_intr_info : t6_le_intr_info))
t4_fatal_err(adap);
}
@@ -2825,7 +3441,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
pcie_intr_handler(adapter);
if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
- if (!is_t4(adapter->params.chip) && (cause & MC1_S))
+ if (is_t5(adapter->params.chip) && (cause & MC1_F))
mem_intr_handler(adapter, MEM_MC1);
if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
@@ -2871,17 +3487,18 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
+ u32 val = 0;
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
- ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
- ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
- EGRESS_SIZE_ERR_F);
+ DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
@@ -2945,18 +3562,18 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
FW_RSS_IND_TBL_CMD_VIID_V(viid));
- cmd.retval_len16 = htonl(FW_LEN16(cmd));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
while (n > 0) {
int nq = min(n, 32);
__be32 *qp = &cmd.iq0_to_iq2;
- cmd.niqid = htons(nq);
- cmd.startidx = htons(start);
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
start += nq;
n -= nq;
@@ -2974,7 +3591,7 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
if (++rsp >= rsp_end)
rsp = rspq;
- *qp++ = htonl(v);
+ *qp++ = cpu_to_be32(v);
nq -= 3;
}
@@ -3000,20 +3617,46 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- c.retval_len16 = htonl(FW_LEN16(c));
+ c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
- c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+ c.u.manual.mode_pkd =
+ cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
- htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
- c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+ c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
} else
return -EINVAL;
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+ FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
/* Read an RSS table row */
static int rd_rss_row(struct adapter *adap, int row, u32 *val)
{
@@ -3045,6 +3688,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int ret, i;
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ (rw ? FW_CMD_READ_F :
+ FW_CMD_WRITE_F) |
+ FW_LDST_CMD_ADDRSPACE_V(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (!ret && rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+}
+
+/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
@@ -3053,8 +3730,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
*/
void t4_read_rss_key(struct adapter *adap, u32 *key)
{
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+ else
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
}
/**
@@ -3069,11 +3749,32 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
{
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
- if (idx >= 0 && idx < 16)
- t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
- KEYWRADDR_V(idx) | KEYWREN_F);
+ u8 rss_key_addr_cnt = 16;
+ u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ if (adap->flags & FW_OK)
+ t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+ else
+ t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+ TP_RSS_SECRET_KEY0_A);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDRX_V(idx >> 4) |
+ T6_VFWRADDR_V(idx) | KEYWREN_F);
+ else
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDR_V(idx) | KEYWREN_F);
+ }
}
/**
@@ -3088,8 +3789,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
u32 *valp)
{
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- valp, 1, TP_RSS_PF0_CONFIG_A + index);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, valp, 1,
+ TP_RSS_PF0_CONFIG_A + index, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ valp, 1, TP_RSS_PF0_CONFIG_A + index);
}
/**
@@ -3107,8 +3812,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
{
u32 vrt, mask, data;
- mask = VFWRADDR_V(VFWRADDR_M);
- data = VFWRADDR_V(index);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ mask = VFWRADDR_V(VFWRADDR_M);
+ data = VFWRADDR_V(index);
+ } else {
+ mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
+ data = T6_VFWRADDR_V(index);
+ }
/* Request that the index'th VF Table values be read into VFL/VFH.
*/
@@ -3119,10 +3829,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfl, 1, TP_RSS_VFL_CONFIG_A);
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfh, 1, TP_RSS_VFH_CONFIG_A);
+ if (adapter->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+ t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfl, 1, TP_RSS_VFL_CONFIG_A);
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ vfh, 1, TP_RSS_VFH_CONFIG_A);
+ }
}
/**
@@ -3135,8 +3850,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
{
u32 pfmap;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmap, 1, TP_RSS_PF_MAP_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmap, 1, TP_RSS_PF_MAP_A);
return pfmap;
}
@@ -3150,8 +3868,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
{
u32 pfmask;
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmask, 1, TP_RSS_PF_MSK_A);
+ if (adapter->flags & FW_OK)
+ t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+ else
+ t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &pfmask, 1, TP_RSS_PF_MSK_A);
return pfmask;
}
@@ -3176,18 +3897,18 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
- v4->tcpOutRsts = STAT(OUT_RST);
- v4->tcpInSegs = STAT64(IN_SEG);
- v4->tcpOutSegs = STAT64(OUT_SEG);
- v4->tcpRetransSegs = STAT64(RXT_SEG);
+ v4->tcp_out_rsts = STAT(OUT_RST);
+ v4->tcp_in_segs = STAT64(IN_SEG);
+ v4->tcp_out_segs = STAT64(OUT_SEG);
+ v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
- v6->tcpOutRsts = STAT(OUT_RST);
- v6->tcpInSegs = STAT64(IN_SEG);
- v6->tcpOutSegs = STAT64(OUT_SEG);
- v6->tcpRetransSegs = STAT64(RXT_SEG);
+ v6->tcp_out_rsts = STAT(OUT_RST);
+ v6->tcp_in_segs = STAT64(IN_SEG);
+ v6->tcp_out_segs = STAT64(OUT_SEG);
+ v6->tcp_retrans_segs = STAT64(RXT_SEG);
}
#undef STAT64
#undef STAT
@@ -3195,6 +3916,130 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
}
/**
+ * t4_tp_get_err_stats - read TP's error MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 8,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 4,
+ TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 4,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 4,
+ TP_MIB_TCP_V6IN_ERR_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 2,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, 2,
+ TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 2,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
+ }
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ * t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 8, TP_MIB_CPL_IN_REQ_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 2, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ 2, TP_MIB_CPL_OUT_RSP_0_A);
+ }
+}
+
+/**
+ * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+ 2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ * @adap: the adapter
+ * @idx: the port index
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st)
+{
+ u32 val[2];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+ 1, TP_MIB_FCOE_DDP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+ 1, TP_MIB_FCOE_DROP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+ st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+ u32 val[4];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+ TP_MIB_USM_PKTS_A);
+ st->frames = val[0];
+ st->drops = val[1];
+ st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
@@ -3346,6 +4191,52 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
}
}
+/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
+ * clocks. The formula is
+ *
+ * bytes/s = bytes256 * 256 * ClkFreq / 4096
+ *
+ * which is equivalent to
+ *
+ * bytes/s = 62.5 * bytes256 * ClkFreq_ms
+ */
+static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
+{
+ u64 v = bytes256 * adap->params.vpd.cclk;
+
+ return v * 62 + v / 2;
+}
+
+/**
+ * t4_get_chan_txrate - get the current per channel Tx rates
+ * @adap: the adapter
+ * @nic_rate: rates for NIC traffic
+ * @ofld_rate: rates for offloaded traffic
+ *
+ * Return the current Tx rates in bytes/s for NIC and offloaded traffic
+ * for each channel.
+ */
+void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
+{
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TX_TRATE_A);
+ nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
+ nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
+ if (adap->params.arch.nchan == NCHAN) {
+ nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
+ nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
+ }
+
+ v = t4_read_reg(adap, TP_TX_ORATE_A);
+ ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
+ ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
+ if (adap->params.arch.nchan == NCHAN) {
+ ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
+ ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
+ }
+}
+
/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
@@ -3401,7 +4292,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
- * get_mps_bg_map - return the buffer groups associated with a port
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
*
@@ -3409,7 +4300,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
-static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
{
u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
@@ -3451,6 +4342,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
}
/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous
+ * snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -3460,7 +4373,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- u32 bgmap = get_mps_bg_map(adap, idx);
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -3534,103 +4447,51 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
}
/**
- * t4_wol_magic_enable - enable/disable magic packet WoL
- * @adap: the adapter
- * @port: the physical port index
- * @addr: MAC address expected in magic packets, %NULL to disable
- *
- * Enables/disables magic packet wake-on-LAN for the selected port.
- */
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr)
-{
- u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
-
- if (is_t4(adap->params.chip)) {
- mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- } else {
- mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
- }
-
- if (addr) {
- t4_write_reg(adap, mag_id_reg_l,
- (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | addr[5]);
- t4_write_reg(adap, mag_id_reg_h,
- (addr[0] << 8) | addr[1]);
- }
- t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
- addr ? MAGICEN_F : 0);
-}
-
-/**
- * t4_wol_pat_enable - enable/disable pattern-based WoL
+ * t4_get_lb_stats - collect loopback port statistics
* @adap: the adapter
- * @port: the physical port index
- * @map: bitmap of which HW pattern filters to set
- * @mask0: byte mask for bytes 0-63 of a packet
- * @mask1: byte mask for bytes 64-127 of a packet
- * @crc: Ethernet CRC for selected bytes
- * @enable: enable/disable switch
+ * @idx: the loopback port index
+ * @p: the stats structure to fill
*
- * Sets the pattern filters indicated in @map to mask out the bytes
- * specified in @mask0/@mask1 in received packets and compare the CRC of
- * the resulting packet against @crc. If @enable is %true pattern-based
- * WoL is enabled, otherwise disabled.
+ * Return HW statistics for the given loopback port.
*/
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable)
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
{
- int i;
- u32 port_cfg_reg;
-
- if (is_t4(adap->params.chip))
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- else
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
- if (!enable) {
- t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
- return 0;
- }
- if (map > 0xff)
- return -EINVAL;
-
-#define EPIO_REG(name) \
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
(is_t4(adap->params.chip) ? \
- PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
- T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
+ PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
- t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
- t4_write_reg(adap, EPIO_REG(DATA2), mask1);
- t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+ p->octets = GET_STAT(BYTES);
+ p->frames = GET_STAT(FRAMES);
+ p->bcast_frames = GET_STAT(BCAST);
+ p->mcast_frames = GET_STAT(MCAST);
+ p->ucast_frames = GET_STAT(UCAST);
+ p->error_frames = GET_STAT(ERROR);
+
+ p->frames_64 = GET_STAT(64B);
+ p->frames_65_127 = GET_STAT(65B_127B);
+ p->frames_128_255 = GET_STAT(128B_255B);
+ p->frames_256_511 = GET_STAT(256B_511B);
+ p->frames_512_1023 = GET_STAT(512B_1023B);
+ p->frames_1024_1518 = GET_STAT(1024B_1518B);
+ p->frames_1519_max = GET_STAT(1519B_MAX);
+ p->drop = GET_STAT(DROP_FRAMES);
+
+ p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+ p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+ p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+ p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+ p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+ p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+ p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+ p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
- for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
- if (!(map & 1))
- continue;
-
- /* write byte masks */
- t4_write_reg(adap, EPIO_REG(DATA0), mask0);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
-
- /* write CRC */
- t4_write_reg(adap, EPIO_REG(DATA0), crc);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
- }
-#undef EPIO_REG
-
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
- return 0;
+#undef GET_STAT
+#undef GET_STAT_COM
}
/* t4_mk_filtdelwr - create a delete filter WR
@@ -3644,33 +4505,38 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
{
memset(wr, 0, sizeof(*wr));
- wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
- wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_NOREPLY_V(qid < 0));
- wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
+ wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
+ FW_FILTER_WR_NOREPLY_V(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
if (qid >= 0)
- wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
}
#define INIT_CMD(var, cmd, rd_wr) do { \
- (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
- FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
- (var).retval_len16 = htonl(FW_LEN16(var)); \
+ (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+ FW_CMD_REQUEST_F | \
+ FW_CMD_##rd_wr##_F); \
+ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
} while (0)
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val)
{
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.addrval.addr = htonl(addr);
- c.u.addrval.val = htonl(val);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.addrval.addr = cpu_to_be32(addr);
+ c.u.addrval.val = cpu_to_be32(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3690,19 +4556,22 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp)
{
int ret;
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
- FW_LDST_CMD_MMD_V(mmd));
- c.u.mdio.raddr = htons(reg);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
+ c.u.mdio.raddr = cpu_to_be16(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
- *valp = ntohs(c.u.mdio.rval);
+ *valp = be16_to_cpu(c.u.mdio.rval);
return ret;
}
@@ -3720,16 +4589,19 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val)
{
+ u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
- c.cycles_to_len16 = htonl(FW_LEN16(c));
- c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
- FW_LDST_CMD_MMD_V(mmd));
- c.u.mdio.raddr = htons(reg);
- c.u.mdio.rval = htons(val);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+ FW_LDST_CMD_MMD_V(mmd));
+ c.u.mdio.raddr = cpu_to_be16(reg);
+ c.u.mdio.rval = cpu_to_be16(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -3841,6 +4713,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
}
/**
+ * t4_sge_ctxt_flush - flush the SGE context cache
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a FW command through the given mailbox to flush the
+ * SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+ int ret;
+ u32 ldst_addrspace;
+ struct fw_ldst_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ ldst_addrspace);
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ return ret;
+}
+
+/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -3863,11 +4761,11 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
retry:
memset(&c, 0, sizeof(c));
INIT_CMD(c, HELLO, WRITE);
- c.err_to_clearinit = htonl(
+ c.err_to_clearinit = cpu_to_be32(
FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
- FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
- FW_HELLO_CMD_MBMASTER_M) |
+ FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
+ mbox : FW_HELLO_CMD_MBMASTER_M) |
FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
FW_HELLO_CMD_CLEARINIT_F);
@@ -3888,7 +4786,7 @@ retry:
return ret;
}
- v = ntohl(c.err_to_clearinit);
+ v = be32_to_cpu(c.err_to_clearinit);
master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
if (state) {
if (v & FW_HELLO_CMD_ERR_F)
@@ -4017,7 +4915,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(reset);
+ c.val = cpu_to_be32(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4050,8 +4948,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
memset(&c, 0, sizeof(c));
INIT_CMD(c, RESET, WRITE);
- c.val = htonl(PIORST_F | PIORSTMODE_F);
- c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
+ c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
+ c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4190,7 +5088,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
* the newly loaded firmware will handle this right by checking
* its header flags to see if it advertises the capability.
*/
- reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
return t4_fw_restart(adap, mbox, reset);
}
@@ -4321,7 +5219,7 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
}
/**
- * t4_query_params - query FW or device parameters
+ * t4_query_params_rw - query FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
@@ -4329,13 +5227,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
+ * @rw: Write and read flag
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
-int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
- unsigned int vf, unsigned int nparams, const u32 *params,
- u32 *val)
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val, int rw)
{
int i, ret;
struct fw_params_cmd c;
@@ -4345,22 +5244,35 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
return -EINVAL;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- for (i = 0; i < nparams; i++, p += 2)
- *p = htonl(*params++);
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++) {
+ *p++ = cpu_to_be32(*params++);
+ if (rw)
+ *p = cpu_to_be32(*(val + i));
+ p++;
+ }
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
- *val++ = ntohl(*p);
+ *val++ = be32_to_cpu(*p);
return ret;
}
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
/**
- * t4_set_params_nosleep - sets FW or device parameters
+ * t4_set_params_timeout - sets FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
@@ -4368,15 +5280,15 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
+ * @timeout: the timeout time
*
- * Does not ever sleep
* Sets the value of FW or device parameters. Up to 7 parameters can be
* specified at once.
*/
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
- const u32 *val)
+ const u32 *val, int timeout)
{
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
@@ -4386,9 +5298,9 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_PARAMS_CMD_PFN_V(pf) |
+ FW_PARAMS_CMD_VFN_V(vf));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
while (nparams--) {
@@ -4396,7 +5308,7 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
*p++ = cpu_to_be32(*val++);
}
- return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
}
/**
@@ -4416,23 +5328,8 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val)
{
- struct fw_params_cmd c;
- __be32 *p = &c.param[0].mnem;
-
- if (nparams > 7)
- return -EINVAL;
-
- memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
- FW_PARAMS_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- while (nparams--) {
- *p++ = htonl(*params++);
- *p++ = htonl(*val++);
- }
-
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+ FW_CMD_MAX_TIMEOUT);
}
/**
@@ -4465,20 +5362,21 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
- FW_PFVF_CMD_VFN_V(vf));
- c.retval_len16 = htonl(FW_LEN16(c));
- c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
- FW_PFVF_CMD_NIQ_V(rxq));
- c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
- FW_PFVF_CMD_PMASK_V(pmask) |
- FW_PFVF_CMD_NEQ_V(txq));
- c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
- FW_PFVF_CMD_NEXACTF_V(nexact));
- c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
- FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
- FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+ FW_PFVF_CMD_VFN_V(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+ FW_PFVF_CMD_NIQ_V(rxq));
+ c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
+ FW_PFVF_CMD_PMASK_V(pmask) |
+ FW_PFVF_CMD_NEQ_V(txq));
+ c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
+ FW_PFVF_CMD_NVI_V(vi) |
+ FW_PFVF_CMD_NEXACTF_V(nexact));
+ c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+ FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4507,10 +5405,10 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
c.portid_pkd = FW_VI_CMD_PORTID_V(port);
c.nmac = nmac - 1;
@@ -4532,8 +5430,35 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
}
if (rss_size)
- *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
- return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
+ *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+ return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F |
+ FW_VI_CMD_PFN_V(pf) |
+ FW_VI_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+ c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
}
/**
@@ -4569,14 +5494,16 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
- c.retval_len16 = htonl(FW_LEN16(c));
- c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
- FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
- FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
- FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
- FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_RXMODE_CMD_VIID_V(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.mtu_to_vlanexen =
+ cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+ FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+ FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+ FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+ FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -4606,43 +5533,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
struct fw_vi_mac_cmd c;
- struct fw_vi_mac_exact *p;
- unsigned int max_naddr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int nfilters = 0;
+ unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+ unsigned int rem = naddr;
- if (naddr > 7)
+ if (naddr > max_naddr)
return -EINVAL;
- memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
- FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
- FW_CMD_LEN16_V((naddr + 2) / 2));
-
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
+ for (offset = 0; offset < naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+ rem : ARRAY_SIZE(c.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
- ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
- if (ret)
- return ret;
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(free) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V(len16));
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx =
+ cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(
+ FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset + i],
+ sizeof(p->macaddr));
+ }
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+ /* It's okay if we run out of space in our MAC address arena.
+ * Some of the addresses we submit may get stored so we need
+ * to run through the reply to see what the results were ...
+ */
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret && ret != -FW_ENOMEM)
+ break;
- if (idx)
- idx[i] = index >= max_naddr ? 0xffff : index;
- if (index < max_naddr)
- ret++;
- else if (hash)
- *hash |= (1ULL << hash_mac_addr(addr[i]));
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_G(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset + i] = (index >= max_naddr ?
+ 0xffff : index);
+ if (index < max_naddr)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL <<
+ hash_mac_addr(addr[offset + i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ if (ret == 0 || ret == -FW_ENOMEM)
+ ret = nfilters;
return ret;
}
@@ -4671,26 +5626,25 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
- unsigned int max_mac_addr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
- p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
- FW_VI_MAC_CMD_IDX_V(idx));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
+ p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+ FW_VI_MAC_CMD_IDX_V(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
- ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+ ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
}
@@ -4714,11 +5668,12 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
- FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
- FW_CMD_LEN16_V(1));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+ FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+ FW_CMD_LEN16_V(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
@@ -4741,12 +5696,13 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
- FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
- FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+ FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+ FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
+ FW_LEN16(c));
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4781,10 +5737,11 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
- c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
- c.blinkdur = htons(nblinks);
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_VI_ENABLE_CMD_VIID_V(viid));
+ c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
+ c.blinkdur = cpu_to_be16(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4808,14 +5765,14 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
- FW_IQ_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
- c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
- c.iqid = htons(iqid);
- c.fl0id = htons(fl0id);
- c.fl1id = htons(fl1id);
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4835,11 +5792,12 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
- FW_EQ_ETH_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_ETH_CMD_PFN_V(pf) |
+ FW_EQ_ETH_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4859,11 +5817,12 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
- FW_EQ_CTRL_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
- c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_CTRL_CMD_PFN_V(pf) |
+ FW_EQ_CTRL_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4883,11 +5842,12 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
- c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
- FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
- FW_EQ_OFLD_CMD_VFN_V(vf));
- c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
- c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+ FW_EQ_OFLD_CMD_PFN_V(pf) |
+ FW_EQ_OFLD_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4905,11 +5865,11 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
- int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
+ int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
- u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
@@ -5043,6 +6003,22 @@ static int get_flash_params(struct adapter *adap)
return 0;
}
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+ u16 val;
+ u32 pcie_cap;
+
+ pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ pci_read_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+ val |= range;
+ pci_write_config_word(adapter->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -5075,9 +6051,30 @@ int t4_prep_adapter(struct adapter *adapter)
switch (ver) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -5094,11 +6091,14 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.nports = 1;
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
+
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
return 0;
}
/**
- * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -5122,7 +6122,7 @@ int t4_prep_adapter(struct adapter *adapter)
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
u64 *pbar2_qoffset,
@@ -5154,7 +6154,7 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
- bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
@@ -5223,18 +6223,19 @@ int t4_init_devlog_params(struct adapter *adap)
/* Otherwise, ask the firmware for it's Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
- devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+ devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F);
+ devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
&devlog_cmd);
if (ret)
return ret;
- devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+ devlog_meminfo =
+ be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
- dparams->size = ntohl(devlog_cmd.memsize_devlog);
+ dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
return 0;
}
@@ -5255,13 +6256,13 @@ int t4_init_sge_params(struct adapter *adapter)
*/
hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
- (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
*/
s_qpp = (QUEUESPERPAGEPF0_S +
- (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
@@ -5292,12 +6293,19 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A);
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG_A);
+ if (adap->flags & FW_OK) {
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, 1);
+ t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A, 1);
+ } else {
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A);
+ t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A);
+ }
/* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
@@ -5373,6 +6381,29 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
return field_shift;
}
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+ int i, ret;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ rvc.op_to_viid =
+ cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
+ rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+ }
+ return 0;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
@@ -5390,10 +6421,10 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_PORT_CMD_PORTID_V(j));
- c.action_to_len16 = htonl(
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(j));
+ c.action_to_len16 = cpu_to_be32(
FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -5411,22 +6442,23 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
- ret = ntohl(c.u.info.lstatus_to_modtype);
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
FW_PORT_CMD_MDIOADDR_G(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_G(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
- rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
- rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ rvc.op_to_viid =
+ cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
if (ret)
return ret;
- p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+ p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
- init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+ init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
@@ -5717,3 +6749,130 @@ void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
cfg | adap->params.tp.la_mask);
}
+
+/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds). If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ * @adapter: the adapter
+ * @idma: the adapter IDMA Monitor state
+ *
+ * Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma)
+{
+ /* Initialize the state variables for detecting an SGE Ingress DMA
+ * hang. The SGE has internal counters which count up on each clock
+ * tick whenever the SGE finds its Ingress DMA State Engines in the
+ * same state they were on the previous clock tick. The clock used is
+ * the Core Clock so we have a limit on the maximum "time" they can
+ * record; typically a very small number of seconds. For instance,
+ * with a 600MHz Core Clock, we can only count up to a bit more than
+ * 7s. So we'll synthesize a larger counter in order to not run the
+ * risk of having the "timers" overflow and give us the flexibility to
+ * maintain a Hung SGE State Machine of our own which operates across
+ * a longer time frame.
+ */
+ idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+ idma->idma_stalled[0] = 0;
+ idma->idma_stalled[1] = 0;
+}
+
+/**
+ * t4_idma_monitor - monitor SGE Ingress DMA state
+ * @adapter: the adapter
+ * @idma: the adapter IDMA Monitor state
+ * @hz: number of ticks/second
+ * @ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+ struct sge_idma_monitor_state *idma,
+ int hz, int ticks)
+{
+ int i, idma_same_state_cnt[2];
+
+ /* Read the SGE Debug Ingress DMA Same State Count registers. These
+ * are counters inside the SGE which count up on each clock when the
+ * SGE finds its Ingress DMA State Engines in the same states they
+ * were in the previous clock. The counters will peg out at
+ * 0xffffffff without wrapping around so once they pass the 1s
+ * threshold they'll stay above that till the IDMA state changes.
+ */
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
+ idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
+ idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+
+ for (i = 0; i < 2; i++) {
+ u32 debug0, debug11;
+
+ /* If the Ingress DMA Same State Counter ("timer") is less
+ * than 1s, then we can reset our synthesized Stall Timer and
+ * continue. If we have previously emitted warnings about a
+ * potential stalled Ingress Queue, issue a note indicating
+ * that the Ingress Queue has resumed forward progress.
+ */
+ if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+ if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
+ dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
+ "resumed after %d seconds\n",
+ i, idma->idma_qid[i],
+ idma->idma_stalled[i] / hz);
+ idma->idma_stalled[i] = 0;
+ continue;
+ }
+
+ /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+ * domain. The first time we get here it'll be because we
+ * passed the 1s Threshold; each additional time it'll be
+ * because the RX Timer Callback is being fired on its regular
+ * schedule.
+ *
+ * If the stall is below our Potential Hung Ingress Queue
+ * Warning Threshold, continue.
+ */
+ if (idma->idma_stalled[i] == 0) {
+ idma->idma_stalled[i] = hz;
+ idma->idma_warn[i] = 0;
+ } else {
+ idma->idma_stalled[i] += ticks;
+ idma->idma_warn[i] -= ticks;
+ }
+
+ if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
+ continue;
+
+ /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+ */
+ if (idma->idma_warn[i] > 0)
+ continue;
+ idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
+
+ /* Read and save the SGE IDMA State and Queue ID information.
+ * We do this every time in case it changes across time ...
+ * can't be too careful ...
+ */
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
+ debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+ idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+ t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
+ debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+ idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+ dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
+ "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+ i, idma->idma_qid[i], idma->idma_state[i],
+ idma->idma_stalled[i] / hz,
+ debug0, debug11);
+ t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 380b15c0417a..c8488f430d19 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -52,8 +52,6 @@ enum {
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
- NWOL_PAT = 8, /* # of WoL patterns */
- WOL_PAT_LEN = 128, /* length of WoL patterns */
};
enum {
@@ -61,6 +59,8 @@ enum {
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+ CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
+ CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */
CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */
CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */
TPLA_SIZE = 128, /* # of 64-bit words in TP LA */
@@ -152,17 +152,33 @@ struct rsp_ctrl {
};
};
-#define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
-#define RSPD_QID(x) RSPD_LEN(x)
+#define RSPD_NEWBUF_S 31
+#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
+#define RSPD_NEWBUF_F RSPD_NEWBUF_V(1U)
-#define RSPD_GEN(x) ((x) >> 7)
-#define RSPD_TYPE(x) (((x) >> 4) & 3)
+#define RSPD_LEN_S 0
+#define RSPD_LEN_M 0x7fffffff
+#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
-#define V_QINTR_CNT_EN 0x0
-#define QINTR_CNT_EN 0x1
-#define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+#define RSPD_QID_S RSPD_LEN_S
+#define RSPD_QID_M RSPD_LEN_M
+#define RSPD_QID_G(x) RSPD_LEN_G(x)
+
+#define RSPD_GEN_S 7
+
+#define RSPD_TYPE_S 4
+#define RSPD_TYPE_M 0x3
+#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define QINTR_CNT_EN_S 0
+#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
+#define QINTR_CNT_EN_F QINTR_CNT_EN_V(1U)
+
+#define QINTR_TIMER_IDX_S 1
+#define QINTR_TIMER_IDX_M 0x7
+#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
+#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
/*
* Flash layout.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 30a2f56e99c2..132cb8fc0bf7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -634,26 +634,9 @@ struct cpl_tid_release {
struct cpl_tx_pkt_core {
__be32 ctrl0;
-#define TXPKT_VF(x) ((x) << 0)
-#define TXPKT_PF(x) ((x) << 8)
-#define TXPKT_VF_VLD (1 << 11)
-#define TXPKT_OVLAN_IDX(x) ((x) << 12)
-#define TXPKT_INTF(x) ((x) << 16)
-#define TXPKT_INS_OVLAN (1 << 21)
-#define TXPKT_OPCODE(x) ((x) << 24)
__be16 pack;
__be16 len;
__be64 ctrl1;
-#define TXPKT_CSUM_END(x) ((x) << 12)
-#define TXPKT_CSUM_START(x) ((x) << 20)
-#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20)
-#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30)
-#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
-#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40)
-#define TXPKT_VLAN(x) ((u64)(x) << 44)
-#define TXPKT_VLAN_VLD (1ULL << 60)
-#define TXPKT_IPCSUM_DIS (1ULL << 62)
-#define TXPKT_L4CSUM_DIS (1ULL << 63)
};
struct cpl_tx_pkt {
@@ -663,16 +646,69 @@ struct cpl_tx_pkt {
#define cpl_tx_pkt_xt cpl_tx_pkt
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S 0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S 8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S 11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S 12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S 16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S 21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S 24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S 12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S 20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S 20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S 30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S 34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define T6_TXPKT_ETHHDR_LEN_S 32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S 40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S 44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S 60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S 62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S 63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F TXPKT_L4CSUM_DIS_V(1ULL)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
-#define LSO_TCPHDR_LEN(x) ((x) << 0)
-#define LSO_IPHDR_LEN(x) ((x) << 4)
-#define LSO_ETHHDR_LEN(x) ((x) << 16)
-#define LSO_IPV6(x) ((x) << 20)
-#define LSO_LAST_SLICE (1 << 22)
-#define LSO_FIRST_SLICE (1 << 23)
-#define LSO_OPCODE(x) ((x) << 24)
-#define LSO_T5_XFER_SIZE(x) ((x) << 0)
__be16 ipid_ofst;
__be16 mss;
__be32 seqno_offset;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 326674b19983..375a825573b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -418,6 +418,20 @@
#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S 18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S 17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S 0
+#define ERROR_QID_M 0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
@@ -448,8 +462,13 @@
#define SGE_STAT_MATCH_A 0x10e8
#define SGE_STAT_CFG_A 0x10ec
+#define STATMODE_S 2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
#define STATSOURCE_T5_S 9
+#define STATSOURCE_T5_M 0xfU
#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
#define SGE_DBFIFO_STATUS2_A 0x1118
@@ -705,6 +724,10 @@
#define REGISTER_S 0
#define REGISTER_V(x) ((x) << REGISTER_S)
+#define T6_ENABLE_S 31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F T6_ENABLE_V(1U)
+
#define PFNUM_S 0
#define PFNUM_V(x) ((x) << PFNUM_S)
@@ -1338,6 +1361,42 @@
#define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S)
#define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U)
+#define TP_TX_ORATE_A 0x7ebc
+
+#define OFDRATE3_S 24
+#define OFDRATE3_M 0xffU
+#define OFDRATE3_G(x) (((x) >> OFDRATE3_S) & OFDRATE3_M)
+
+#define OFDRATE2_S 16
+#define OFDRATE2_M 0xffU
+#define OFDRATE2_G(x) (((x) >> OFDRATE2_S) & OFDRATE2_M)
+
+#define OFDRATE1_S 8
+#define OFDRATE1_M 0xffU
+#define OFDRATE1_G(x) (((x) >> OFDRATE1_S) & OFDRATE1_M)
+
+#define OFDRATE0_S 0
+#define OFDRATE0_M 0xffU
+#define OFDRATE0_G(x) (((x) >> OFDRATE0_S) & OFDRATE0_M)
+
+#define TP_TX_TRATE_A 0x7ed0
+
+#define TNLRATE3_S 24
+#define TNLRATE3_M 0xffU
+#define TNLRATE3_G(x) (((x) >> TNLRATE3_S) & TNLRATE3_M)
+
+#define TNLRATE2_S 16
+#define TNLRATE2_M 0xffU
+#define TNLRATE2_G(x) (((x) >> TNLRATE2_S) & TNLRATE2_M)
+
+#define TNLRATE1_S 8
+#define TNLRATE1_M 0xffU
+#define TNLRATE1_G(x) (((x) >> TNLRATE1_S) & TNLRATE1_M)
+
+#define TNLRATE0_S 0
+#define TNLRATE0_M 0xffU
+#define TNLRATE0_G(x) (((x) >> TNLRATE0_S) & TNLRATE0_M)
+
#define TP_VLAN_PRI_MAP_A 0x140
#define FRAGMENTATION_S 9
@@ -1399,6 +1458,8 @@
#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
#define TP_MIB_MAC_IN_ERR_0_A 0x0
+#define TP_MIB_HDR_IN_ERR_0_A 0x4
+#define TP_MIB_TCP_IN_ERR_0_A 0x8
#define TP_MIB_TCP_OUT_RST_A 0xc
#define TP_MIB_TCP_IN_SEG_HI_A 0x10
#define TP_MIB_TCP_IN_SEG_LO_A 0x11
@@ -1407,11 +1468,19 @@
#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
#define TP_MIB_TCP_V6OUT_RST_A 0x2c
#define TP_MIB_OFD_ARP_DROP_A 0x36
+#define TP_MIB_CPL_IN_REQ_0_A 0x38
+#define TP_MIB_CPL_OUT_RSP_0_A 0x3c
#define TP_MIB_TNL_DROP_0_A 0x44
+#define TP_MIB_FCOE_DDP_0_A 0x48
+#define TP_MIB_FCOE_DROP_0_A 0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A 0x50
#define TP_MIB_OFD_VLN_DROP_0_A 0x58
+#define TP_MIB_USM_PKTS_A 0x5c
+#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
@@ -1572,6 +1641,7 @@
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
@@ -2054,6 +2124,11 @@
#define VFLKPIDX_M 0xffU
#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
+#define T6_VFWRADDR_S 8
+#define T6_VFWRADDR_M 0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
#define TP_RSS_CONFIG_CNG_A 0x7e04
#define TP_RSS_SECRET_KEY0_A 0x40
#define TP_RSS_PF0_CONFIG_A 0x30
@@ -2175,7 +2250,28 @@
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define DMACH_S 0
+#define DMACH_M 0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
#define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S 31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S 25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S 17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S 16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
@@ -2184,6 +2280,45 @@
#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
#define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S 26
+
+#define T6_SRAM_PRIO3_S 23
+#define T6_SRAM_PRIO3_M 0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S 20
+#define T6_SRAM_PRIO2_M 0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S 17
+#define T6_SRAM_PRIO1_M 0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S 14
+#define T6_SRAM_PRIO0_M 0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S 13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S 12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F T6_REPLICATE_V(1U)
+
+#define T6_PF_S 9
+#define T6_PF_M 0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S 8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F T6_VF_VALID_V(1U)
+
+#define T6_VF_S 0
+#define T6_VF_M 0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
#define MPS_CLS_SRAM_H_A 0xe004
#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
@@ -2433,6 +2568,8 @@
#define CIM_F CIM_V(1U)
#define MC1_S 31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F MC1_V(1U)
#define PL_INT_ENABLE_A 0x19410
#define PL_INT_MAP0_A 0x19414
@@ -2463,6 +2600,18 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define T6_UNKNOWNCMD_S 3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S 2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S 1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+
#define LE_DB_INT_CAUSE_A 0x19c3c
#define REQQPARERR_S 16
@@ -2485,6 +2634,14 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define TCAMINTPERR_S 13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S 10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
+
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
@@ -2638,6 +2795,33 @@
#define CIM_IBQ_DBG_DATA_A 0x7b68
#define CIM_OBQ_DBG_DATA_A 0x7b6c
+#define CIM_DEBUGCFG_A 0x7b70
+#define CIM_DEBUGSTS_A 0x7b74
+
+#define POLADBGRDPTR_S 23
+#define POLADBGRDPTR_M 0x1ffU
+#define POLADBGRDPTR_V(x) ((x) << POLADBGRDPTR_S)
+
+#define POLADBGWRPTR_S 16
+#define POLADBGWRPTR_M 0x1ffU
+#define POLADBGWRPTR_G(x) (((x) >> POLADBGWRPTR_S) & POLADBGWRPTR_M)
+
+#define PILADBGRDPTR_S 14
+#define PILADBGRDPTR_M 0x1ffU
+#define PILADBGRDPTR_V(x) ((x) << PILADBGRDPTR_S)
+
+#define PILADBGWRPTR_S 0
+#define PILADBGWRPTR_M 0x1ffU
+#define PILADBGWRPTR_G(x) (((x) >> PILADBGWRPTR_S) & PILADBGWRPTR_M)
+
+#define LADBGEN_S 12
+#define LADBGEN_V(x) ((x) << LADBGEN_S)
+#define LADBGEN_F LADBGEN_V(1U)
+
+#define CIM_PO_LA_DEBUGDATA_A 0x7b78
+#define CIM_PI_LA_DEBUGDATA_A 0x7b7c
+#define CIM_PO_LA_MADEBUGDATA_A 0x7b80
+#define CIM_PI_LA_MADEBUGDATA_A 0x7b84
#define UPDBGLARDEN_S 1
#define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 19b2dcf6acde..7bdee3bf75ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -61,6 +61,30 @@
#define SGE_TIMERREGS 6
#define TIMERREG_COUNTER0_X 0
+#define FETCHBURSTMIN_64B_X 2
+
+#define FETCHBURSTMAX_256B_X 2
+#define FETCHBURSTMAX_512B_X 3
+
+#define HOSTFCMODE_STATUS_PAGE_X 2
+
+#define CIDXFLUSHTHRESH_32_X 5
+
+#define UPDATEDELIVERY_INTERRUPT_X 1
+
+#define RSPD_TYPE_FLBUF_X 0
+#define RSPD_TYPE_CPL_X 1
+#define RSPD_TYPE_INTR_X 2
+
+/* Congestion Manager Definitions.
+ */
+#define CONMCTXT_CNGTPMODE_S 19
+#define CONMCTXT_CNGTPMODE_V(x) ((x) << CONMCTXT_CNGTPMODE_S)
+#define CONMCTXT_CNGCHMAP_S 0
+#define CONMCTXT_CNGCHMAP_V(x) ((x) << CONMCTXT_CNGCHMAP_S)
+#define CONMCTXT_CNGTPMODE_CHANNEL_X 2
+#define CONMCTXT_CNGTPMODE_QUEUE_X 1
+
/* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
* The User Doorbells are each 128 bytes in length with a Simple Doorbell at
* offsets 8x and a Write Combining single 64-byte Egress Queue Unit
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 03fbfd1fb3df..ab4674684acc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -772,7 +772,7 @@ struct fw_ldst_cmd {
} addrval;
struct fw_ldst_idctxt {
__be32 physid;
- __be32 msg_pkd;
+ __be32 msg_ctxtflush;
__be32 ctxt_data7;
__be32 ctxt_data6;
__be32 ctxt_data5;
@@ -788,15 +788,27 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
- struct fw_ldst_mps {
- __be16 fid_ctl;
- __be16 rplcpf_pkd;
- __be32 rplc127_96;
- __be32 rplc95_64;
- __be32 rplc63_32;
- __be32 rplc31_0;
- __be32 atrb;
- __be16 vlan[16];
+ union fw_ldst_mps {
+ struct fw_ldst_mps_rplc {
+ __be16 fid_idx;
+ __be16 rplcpf_pkd;
+ __be32 rplc255_224;
+ __be32 rplc223_192;
+ __be32 rplc191_160;
+ __be32 rplc159_128;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ } rplc;
+ struct fw_ldst_mps_atrb {
+ __be16 fid_mpsid;
+ __be16 r2[3];
+ __be32 r3[2];
+ __be32 r4;
+ __be32 atrb;
+ __be16 vlan[16];
+ } atrb;
} mps;
struct fw_ldst_func {
u8 access_ctl;
@@ -822,6 +834,10 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_MSG_S 31
#define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S)
+#define FW_LDST_CMD_CTXTFLUSH_S 30
+#define FW_LDST_CMD_CTXTFLUSH_V(x) ((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F FW_LDST_CMD_CTXTFLUSH_V(1U)
+
#define FW_LDST_CMD_PADDR_S 8
#define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
@@ -831,8 +847,8 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID_S 15
#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
-#define FW_LDST_CMD_CTL_S 0
-#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
+#define FW_LDST_CMD_IDX_S 0
+#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S)
#define FW_LDST_CMD_RPLCPF_S 0
#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
@@ -1061,6 +1077,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
FW_PARAMS_PARAM_DEV_DIAG = 0x11,
FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
@@ -1123,6 +1140,12 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_phyfw {
+ FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+ FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
};
enum fw_params_param_dev_diag {
@@ -1377,6 +1400,7 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_IQFLINTCONGEN_S 27
#define FW_IQ_CMD_IQFLINTCONGEN_V(x) ((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+#define FW_IQ_CMD_IQFLINTCONGEN_F FW_IQ_CMD_IQFLINTCONGEN_V(1U)
#define FW_IQ_CMD_IQFLINTISCSIC_S 26
#define FW_IQ_CMD_IQFLINTISCSIC_V(x) ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
@@ -1399,6 +1423,7 @@ struct fw_iq_cmd {
#define FW_IQ_CMD_FL0CONGCIF_S 11
#define FW_IQ_CMD_FL0CONGCIF_V(x) ((x) << FW_IQ_CMD_FL0CONGCIF_S)
+#define FW_IQ_CMD_FL0CONGCIF_F FW_IQ_CMD_FL0CONGCIF_V(1U)
#define FW_IQ_CMD_FL0ONCHIP_S 10
#define FW_IQ_CMD_FL0ONCHIP_V(x) ((x) << FW_IQ_CMD_FL0ONCHIP_S)
@@ -1589,6 +1614,7 @@ struct fw_eq_eth_cmd {
#define FW_EQ_ETH_CMD_FETCHRO_S 22
#define FW_EQ_ETH_CMD_FETCHRO_V(x) ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+#define FW_EQ_ETH_CMD_FETCHRO_F FW_EQ_ETH_CMD_FETCHRO_V(1U)
#define FW_EQ_ETH_CMD_HOSTFCMODE_S 20
#define FW_EQ_ETH_CMD_HOSTFCMODE_V(x) ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
@@ -2526,13 +2552,8 @@ enum fw_port_mod_sub_type {
FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
};
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
enum fw_port_stats_tx_index {
- FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_BYTES_IX = 0,
FW_STAT_TX_PORT_FRAMES_IX,
FW_STAT_TX_PORT_BCAST_IX,
FW_STAT_TX_PORT_MCAST_IX,
@@ -2554,11 +2575,12 @@ enum fw_port_stats_tx_index {
FW_STAT_TX_PORT_PPP4_IX,
FW_STAT_TX_PORT_PPP5_IX,
FW_STAT_TX_PORT_PPP6_IX,
- FW_STAT_TX_PORT_PPP7_IX
+ FW_STAT_TX_PORT_PPP7_IX,
+ FW_NUM_PORT_TX_STATS
};
enum fw_port_stat_rx_index {
- FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_BYTES_IX = 0,
FW_STAT_RX_PORT_FRAMES_IX,
FW_STAT_RX_PORT_BCAST_IX,
FW_STAT_RX_PORT_MCAST_IX,
@@ -2584,9 +2606,14 @@ enum fw_port_stat_rx_index {
FW_STAT_RX_PORT_PPP5_IX,
FW_STAT_RX_PORT_PPP6_IX,
FW_STAT_RX_PORT_PPP7_IX,
- FW_STAT_RX_PORT_LESS_64B_IX
+ FW_STAT_RX_PORT_LESS_64B_IX,
+ FW_STAT_RX_PORT_MAC_ERROR_IX,
+ FW_NUM_PORT_RX_STATS
};
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
struct fw_port_stats_cmd {
__be32 op_to_portid;
__be32 retval_len16;
@@ -3015,7 +3042,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
- FW_HDR_CHIP_T5
+ FW_HDR_CHIP_T5,
+ FW_HDR_CHIP_T6
};
#define FW_HDR_FW_VER_MAJOR_S 24
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index b9d1cbac0eee..32b213559b02 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -45,4 +45,9 @@
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0D
+#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_BUILD 0x00
+
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1d893b0b7ddf..b2b5e5bbe04c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1021,7 +1021,7 @@ static int closest_thres(const struct sge *s, int thres)
static unsigned int qtimer_val(const struct adapter *adapter,
const struct sge_rspq *rspq)
{
- unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+ unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
return timer_idx < SGE_NTIMERS
? adapter->sge.timer_val[timer_idx]
@@ -1086,8 +1086,8 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
* Update the response queue's interrupt coalescing parameters and
* return success.
*/
- rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
- (cnt > 0 ? QINTR_CNT_EN : 0));
+ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+ QINTR_CNT_EN_V(cnt > 0));
return 0;
}
@@ -1439,7 +1439,7 @@ static int cxgb4vf_get_coalesce(struct net_device *dev,
coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
coalesce->rx_max_coalesced_frames =
- ((rspq->intr_params & QINTR_CNT_EN)
+ ((rspq->intr_params & QINTR_CNT_EN_F)
? adapter->sge.counter_val[rspq->pktcnt_idx]
: 0);
return 0;
@@ -2393,8 +2393,9 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
u8 pkt_cnt_idx, unsigned int size,
unsigned int iqe_size)
{
- rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
- (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+ rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+ (pkt_cnt_idx < SGE_NCOUNTERS ?
+ QINTR_CNT_EN_F : 0));
rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
? pkt_cnt_idx
: 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 482f6de6817d..ad53e5ad2acd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
- u32 val;
+ u32 val = adapter->params.arch.sge_fl_db;
/* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
@@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
- val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+ val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
- val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1100,7 +1098,7 @@ nocsum:
* unknown protocol, disable HW csum
* and hope a bad packet is detected
*/
- return TXPKT_L4CSUM_DIS;
+ return TXPKT_L4CSUM_DIS_F;
}
} else {
/*
@@ -1116,16 +1114,21 @@ nocsum:
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (chip <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
- return TXPKT_CSUM_TYPE(csum_type) |
- TXPKT_CSUM_START(start) |
- TXPKT_CSUM_LOC(start + skb->csum_offset);
+ return TXPKT_CSUM_TYPE_V(csum_type) |
+ TXPKT_CSUM_START_V(start) |
+ TXPKT_CSUM_LOC_V(start + skb->csum_offset);
}
}
@@ -1160,7 +1163,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 wr_mid;
u64 cntrl, *end;
- int qidx, credits;
+ int qidx, credits, max_pkt_len;
unsigned int flits, ndesc;
struct adapter *adapter;
struct sge_eth_txq *txq;
@@ -1183,6 +1186,13 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb->len < fw_hdr_copy_len))
goto out_free;
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+ if (skb_vlan_tag_present(skb))
+ max_pkt_len += VLAN_HLEN;
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ goto out_free;
+
/*
* Figure out which TX Queue we're going to use.
*/
@@ -1281,29 +1291,35 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* Fill in the LSO CPL message.
*/
lso->lso_ctrl =
- cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE |
- LSO_LAST_SLICE |
- LSO_IPV6(v6) |
- LSO_ETHHDR_LEN(eth_xtra_len/4) |
- LSO_IPHDR_LEN(l3hdr_len/4) |
- LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+ cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F |
+ LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0);
if (is_t4(adapter->params.chip))
lso->len = cpu_to_be32(skb->len);
else
- lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
+ lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
/*
* Set up TX Packet CPL pointer, control word and perform
* accounting.
*/
cpl = (void *)(lso + 1);
- cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN(l3hdr_len) |
- TXPKT_ETHHDR_LEN(eth_xtra_len));
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
txq->tso++;
txq->tx_cso += ssi->gso_segs;
} else {
@@ -1320,10 +1336,11 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
- cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
}
/*
@@ -1332,15 +1349,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (skb_vlan_tag_present(skb)) {
txq->vlan_ins++;
- cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
}
/*
* Fill in the TX Packet CPL message header.
*/
- cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
- TXPKT_INTF(pi->port_id) |
- TXPKT_PF(0));
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->port_id) |
+ TXPKT_PF_V(0));
cpl->pack = cpu_to_be16(0);
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1663,7 +1680,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
static inline bool is_new_response(const struct rsp_ctrl *rc,
const struct sge_rspq *rspq)
{
- return RSPD_GEN(rc->type_gen) == rspq->gen;
+ return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
}
/**
@@ -1752,8 +1769,8 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* SGE.
*/
dma_rmb();
- rsp_type = RSPD_TYPE(rc->type_gen);
- if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+ rsp_type = RSPD_TYPE_G(rc->type_gen);
+ if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
struct page_frag *fp;
struct pkt_gl gl;
const struct rx_sw_desc *sdesc;
@@ -1764,7 +1781,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* If we get a "new buffer" message from the SGE we
* need to move on to the next Free List buffer.
*/
- if (len & RSPD_NEWBUF) {
+ if (len & RSPD_NEWBUF_F) {
/*
* We get one "new buffer" message when we
* first start up a queue so we need to ignore
@@ -1775,7 +1792,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1);
rspq->offset = 0;
}
- len = RSPD_LEN(len);
+ len = RSPD_LEN_G(len);
}
gl.tot_len = len;
@@ -1818,10 +1835,10 @@ static int process_responses(struct sge_rspq *rspq, int budget)
rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
- } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+ } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
ret = rspq->handler(rspq, rspq->cur_desc, NULL);
} else {
- WARN_ON(rsp_type > RSP_TYPE_CPL);
+ WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
ret = 0;
}
@@ -1833,7 +1850,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
*/
const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
rspq->next_intr_params =
- QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+ QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
break;
}
@@ -1875,7 +1892,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
intr_params = rspq->next_intr_params;
rspq->next_intr_params = rspq->intr_params;
} else
- intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+ intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
@@ -1936,10 +1953,10 @@ static unsigned int process_intrq(struct adapter *adapter)
* never happen ...
*/
dma_rmb();
- if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+ if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
dev_err(adapter->pdev_dev,
"Unexpected INTRQ response type %d\n",
- RSPD_TYPE(rc->type_gen));
+ RSPD_TYPE_G(rc->type_gen));
continue;
}
@@ -1951,7 +1968,7 @@ static unsigned int process_intrq(struct adapter *adapter)
* want to either make them fatal and/or conditionalized under
* DEBUG.
*/
- qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+ qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
iq_idx = IQ_IDX(s, qid);
if (unlikely(iq_idx >= MAX_INGQ)) {
dev_err(adapter->pdev_dev,
@@ -2154,8 +2171,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = t4_bar2_sge_qregs(adapter, qid, qtype,
- &bar2_qoffset, pbar2_qid);
+ ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2239,12 +2256,18 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
if (fl) {
+ enum chip_type chip =
+ CHELSIO_CHIP_VERSION(adapter->params.chip);
/*
* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
- * of the Egress Queue Unit.
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
*/
+ if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
+ fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
sizeof(__be64), sizeof(struct rx_sw_desc),
@@ -2274,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index b9debb4f29a3..88b8981b4751 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -51,6 +51,7 @@
*/
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -156,6 +157,12 @@ struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
@@ -215,6 +222,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
+ struct arch_specific_params arch; /* chip specific params */
enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
@@ -284,11 +292,11 @@ int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid);
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 966ee900ed00..0db6dc9e9ed2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
- * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid)
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
{
unsigned int page_shift, page_size, qpp_shift, qpp_mask;
u64 bar2_page_offset, bar2_qoffset;
@@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
if (naddr > max_naddr)
return -EINVAL;
@@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
/*
* If this is a new allocation, determine whether it should be
@@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
- if (ret >= max_naddr)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter)
switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
break;
case CHELSIO_T5:
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+
+ case CHELSIO_T6:
+ chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
break;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 0be6850be8a2..d106186f4f4a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -5,7 +5,7 @@
#include <linux/in.h>
#include <linux/types.h>
#include <linux/skbuff.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include "enic_res.h"
#include "enic_clsf.h"
@@ -15,14 +15,14 @@
* @rq: rq number to steer to
*
* This function returns filter_id(hardware_id) of the filter
- * added. In case of error it returns an negative number.
+ * added. In case of error it returns a negative number.
*/
int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
{
int res;
struct filter data;
- switch (keys->ip_proto) {
+ switch (keys->basic.ip_proto) {
case IPPROTO_TCP:
data.u.ipv4.protocol = PROTO_TCP;
break;
@@ -33,10 +33,10 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
return -EPROTONOSUPPORT;
};
data.type = FILTER_IPV4_5TUPLE;
- data.u.ipv4.src_addr = ntohl(keys->src);
- data.u.ipv4.dst_addr = ntohl(keys->dst);
- data.u.ipv4.src_port = ntohs(keys->port16[0]);
- data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+ data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
+ data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
+ data.u.ipv4.src_port = ntohs(keys->ports.src);
+ data.u.ipv4.dst_port = ntohs(keys->ports.dst);
data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
spin_lock_bh(&enic->devcmd_lock);
@@ -158,11 +158,11 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
struct enic_rfs_fltr_node *tpos;
hlist_for_each_entry(tpos, h, node)
- if (tpos->keys.src == k->src &&
- tpos->keys.dst == k->dst &&
- tpos->keys.ports == k->ports &&
- tpos->keys.ip_proto == k->ip_proto &&
- tpos->keys.n_proto == k->n_proto)
+ if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
+ tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
+ tpos->keys.ports.ports == k->ports.ports &&
+ tpos->keys.basic.ip_proto == k->basic.ip_proto &&
+ tpos->keys.basic.n_proto == k->basic.n_proto)
return tpos;
return NULL;
}
@@ -177,9 +177,10 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
int res, i;
enic = netdev_priv(dev);
- res = skb_flow_dissect(skb, &keys);
- if (!res || keys.n_proto != htons(ETH_P_IP) ||
- (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+ res = skb_flow_dissect_flow_keys(skb, &keys);
+ if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
+ (keys.basic.ip_proto != IPPROTO_TCP &&
+ keys.basic.ip_proto != IPPROTO_UDP))
return -EPROTONOSUPPORT;
tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 68d47b196dae..f3f1601a76f3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -348,7 +348,7 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
n = htbl_fltr_search(enic, (u16)fsp->location);
if (!n)
return -EINVAL;
- switch (n->keys.ip_proto) {
+ switch (n->keys.basic.ip_proto) {
case IPPROTO_TCP:
fsp->flow_type = TCP_V4_FLOW;
break;
@@ -360,16 +360,16 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
break;
}
- fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
- fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+ fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
- fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+ fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
fsp->ring_cookie = n->rq_id;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index badff181e719..8966f3159bb2 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev)
if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "TP_NW")) {
+ if (strstr(p, "TP_NW")) {
lp->params.autosense = TP_NW;
+ } else if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
} else if (strstr(p, "BNC")) {
lp->params.autosense = BNC;
} else if (strstr(p, "AUI")) {
lp->params.autosense = AUI;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
} else if (strstr(p, "10Mb")) {
lp->params.autosense = _10Mb;
} else if (strstr(p, "100Mb")) {
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 2c30c0c83f98..447d09272ab7 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
netif_carrier_off(dev);
}
}
- db->init=0;
+ db->init = 0;
/* Timer active again */
db->timer.expires = ULI526X_TIMER_WUT;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1274b6fdac8a..cf0a5fcdaaaf 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -463,10 +463,8 @@ rio_open (struct net_device *dev)
dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
}
- init_timer (&np->timer);
+ setup_timer(&np->timer, rio_timer, (unsigned long)dev);
np->timer.expires = jiffies + 1*HZ;
- np->timer.data = (unsigned long) dev;
- np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index ea94a8eb6b35..7108563260ae 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -5,6 +5,15 @@ config BE2NET
This driver implements the NIC functionality for ServerEngines'
10Gbps network adapter - BladeEngine.
+config BE2NET_HWMON
+ bool "HWMON support for be2net driver"
+ depends on BE2NET && HWMON
+ depends on !(BE2NET=y && HWMON=m)
+ default y
+ ---help---
+ Say Y here if you want to expose thermal sensor data on
+ be2net network adapter.
+
config BE2NET_VXLAN
bool "VXLAN offload support on be2net driver"
default y
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 1bf1cdce74ac..8d12b41b3b19 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -31,11 +31,13 @@
#include <linux/slab.h>
#include <linux/u64_stats_sync.h>
#include <linux/cpumask.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "10.6.0.1"
+#define DRV_VER "10.6.0.2"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -314,7 +316,6 @@ struct be_rx_obj {
} ____cacheline_aligned_in_smp;
struct be_drv_stats {
- u32 be_on_die_temperature;
u32 eth_red_drops;
u32 dma_map_errors;
u32 rx_drops_no_pbuf;
@@ -366,6 +367,7 @@ struct be_vf_cfg {
u32 tx_rate;
u32 plink_tracking;
u32 privileges;
+ bool spoofchk;
};
enum vf_state {
@@ -382,6 +384,7 @@ enum vf_state {
#define BE_FLAGS_SETUP_DONE BIT(9)
#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
+#define BE_FLAGS_OS2BMC BIT(12)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
@@ -426,6 +429,8 @@ struct be_resources {
u32 vf_if_cap_flags; /* VF if capability flags */
};
+#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
+
struct rss_info {
u64 rss_flags;
u8 rsstable[RSS_INDIR_TABLE_LEN];
@@ -433,6 +438,12 @@ struct rss_info {
u8 rss_hkey[RSS_HASH_KEY_LEN];
};
+#define BE_INVALID_DIE_TEMP 0xFF
+struct be_hwmon {
+ struct device *hwmon_dev;
+ u8 be_on_die_temp; /* Unit: millidegree Celsius */
+};
+
/* Macros to read/write the 'features' word of be_wrb_params structure.
*/
#define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT
@@ -453,7 +464,8 @@ enum {
BE_WRB_F_LSO_BIT, /* LSO */
BE_WRB_F_LSO6_BIT, /* LSO6 */
BE_WRB_F_VLAN_BIT, /* VLAN */
- BE_WRB_F_VLAN_SKIP_HW_BIT /* Skip VLAN tag (workaround) */
+ BE_WRB_F_VLAN_SKIP_HW_BIT, /* Skip VLAN tag (workaround) */
+ BE_WRB_F_OS2BMC_BIT /* Send packet to the management ring */
};
/* The structure below provides a HW-agnostic abstraction of WRB params
@@ -514,6 +526,7 @@ struct be_adapter {
u16 work_counter;
struct delayed_work be_err_detection_work;
+ u8 err_flags;
u32 flags;
u32 cmd_privileges;
/* Ethtool knobs and info */
@@ -572,8 +585,11 @@ struct be_adapter {
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
+ struct be_hwmon hwmon_info;
u8 pf_number;
struct rss_info rss_info;
+ /* Filters for packets that need to be sent to BMC */
+ u32 bmc_filt_mask;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -772,26 +788,36 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
-static inline bool be_multi_rxq(const struct be_adapter *adapter)
+#define BE_ERROR_EEH 1
+#define BE_ERROR_UE BIT(1)
+#define BE_ERROR_FW BIT(2)
+#define BE_ERROR_HW (BE_ERROR_EEH | BE_ERROR_UE)
+#define BE_ERROR_ANY (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW)
+#define BE_CLEAR_ALL 0xFF
+
+static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
{
- return adapter->num_rx_qs > 1;
+ return (adapter->err_flags & err_type);
}
-static inline bool be_error(struct be_adapter *adapter)
+static inline void be_set_error(struct be_adapter *adapter, int err_type)
{
- return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->err_flags |= err_type;
+ netif_carrier_off(netdev);
+
+ dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
}
-static inline bool be_hw_error(struct be_adapter *adapter)
+static inline void be_clear_error(struct be_adapter *adapter, int err_type)
{
- return adapter->eeh_error || adapter->hw_error;
+ adapter->err_flags &= ~err_type;
}
-static inline void be_clear_all_error(struct be_adapter *adapter)
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
- adapter->eeh_error = false;
- adapter->hw_error = false;
- adapter->fw_timeout = false;
+ return adapter->num_rx_qs > 1;
}
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
@@ -804,6 +830,7 @@ bool be_pause_supported(struct be_adapter *adapter);
u32 be_get_fw_log_level(struct be_adapter *adapter);
int be_update_queues(struct be_adapter *adapter);
int be_poll(struct napi_struct *napi, int budget);
+void be_eqd_update(struct be_adapter *adapter, bool force_update);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index c5e1d0ac75f9..9eac3227d2ca 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -93,7 +93,7 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return;
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
@@ -140,6 +140,7 @@ static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
if (base_status == MCC_STATUS_NOT_SUPPORTED ||
base_status == MCC_STATUS_ILLEGAL_REQUEST ||
addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
(opcode == OPCODE_COMMON_WRITE_FLASHROM &&
(base_status == MCC_STATUS_ILLEGAL_FIELD ||
addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
@@ -191,10 +192,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
if (base_status == MCC_STATUS_SUCCESS) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
(void *)resp_hdr;
- adapter->drv_stats.be_on_die_temperature =
+ adapter->hwmon_info.be_on_die_temp =
resp->on_die_temperature;
} else {
adapter->be_get_temp_freq = 0;
+ adapter->hwmon_info.be_on_die_temp =
+ BE_INVALID_DIE_TEMP;
}
return;
}
@@ -330,6 +333,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
}
+#define MGMT_ENABLE_MASK 0x4
+static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
+ u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
+
+ if (evt_dw1 & MGMT_ENABLE_MASK) {
+ adapter->flags |= BE_FLAGS_OS2BMC;
+ adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
+ } else {
+ adapter->flags &= ~BE_FLAGS_OS2BMC;
+ }
+}
+
static void be_async_grp5_evt_process(struct be_adapter *adapter,
struct be_mcc_compl *compl)
{
@@ -346,6 +364,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
case ASYNC_EVENT_PVID_STATE:
be_async_grp5_pvid_state_process(adapter, compl);
break;
+ /* Async event to disable/enable os2bmc and/or mac-learning */
+ case ASYNC_EVENT_FW_CONTROL:
+ be_async_grp5_fw_control_process(adapter, compl);
+ break;
default:
break;
}
@@ -486,7 +508,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
for (i = 0; i < mcc_timeout; i++) {
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
local_bh_disable();
@@ -499,7 +521,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
- adapter->fw_timeout = true;
+ be_set_error(adapter, BE_ERROR_FW);
return -EIO;
}
return status;
@@ -538,7 +560,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- if (be_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
ready = ioread32(db);
@@ -551,7 +573,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
- adapter->fw_timeout = true;
+ be_set_error(adapter, BE_ERROR_FW);
be_detect_error(adapter);
return -1;
}
@@ -1457,7 +1479,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
*if_handle = le32_to_cpu(resp->interface_id);
/* Hack to retrieve VF's pmac-id on BE3 */
- if (BE3_chip(adapter) && !be_physfn(adapter))
+ if (BE3_chip(adapter) && be_virtfn(adapter))
adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
}
return status;
@@ -3156,7 +3178,7 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
}
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id, u16 hsw_mode)
+ u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_hsw_config *req;
@@ -3192,6 +3214,14 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt, hsw_mode);
}
+ /* Enable/disable both mac and vlan spoof checking */
+ if (!BEx_chip(adapter) && spoofchk) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
+ ctxt, spoofchk);
+ AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
+ ctxt, spoofchk);
+ }
+
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -3202,7 +3232,7 @@ err:
/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id, u8 *mode)
+ u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_hsw_config *req;
@@ -3250,6 +3280,10 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (mode)
*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
port_fwd_type, &resp->context);
+ if (spoofchk)
+ *spoofchk =
+ AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ spoofchk, &resp->context);
}
err:
@@ -3261,7 +3295,7 @@ static bool be_is_wol_excluded(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return true;
switch (pdev->subsystem_device) {
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1ec22300e254..2716e6f30d9a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -65,7 +65,8 @@ enum mcc_base_status {
enum mcc_addl_status {
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
- MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
+ MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab
};
#define CQE_BASE_STATUS_MASK 0xFFFF
@@ -104,6 +105,7 @@ struct be_mcc_compl {
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
#define ASYNC_EVENT_CODE_SLIPORT 0x11
#define ASYNC_EVENT_PORT_MISCONFIG 0x9
+#define ASYNC_EVENT_FW_CONTROL 0x5
enum {
LINK_DOWN = 0x0,
@@ -180,6 +182,22 @@ struct be_async_event_misconfig_port {
u32 flags;
} __packed;
+#define BMC_FILT_BROADCAST_ARP BIT(0)
+#define BMC_FILT_BROADCAST_DHCP_CLIENT BIT(1)
+#define BMC_FILT_BROADCAST_DHCP_SERVER BIT(2)
+#define BMC_FILT_BROADCAST_NET_BIOS BIT(3)
+#define BMC_FILT_BROADCAST BIT(7)
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER BIT(8)
+#define BMC_FILT_MULTICAST_IPV6_RA BIT(9)
+#define BMC_FILT_MULTICAST_IPV6_RAS BIT(10)
+#define BMC_FILT_MULTICAST BIT(15)
+struct be_async_fw_control {
+ u32 event_data_word1;
+ u32 event_data_word2;
+ u32 evt_tag;
+ u32 event_data_word4;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -1109,10 +1127,6 @@ struct be_cmd_req_query_fw_cfg {
u32 rsvd[31];
};
-/* ASIC revisions */
-#define ASIC_REV_B0 0x10
-#define ASIC_REV_P2 0x11
-
struct be_cmd_resp_query_fw_cfg {
struct be_cmd_resp_hdr hdr;
u32 be_config_number;
@@ -1745,18 +1759,24 @@ struct be_cmd_req_set_mac_list {
#define PORT_FWD_TYPE_VEPA 0x3
#define PORT_FWD_TYPE_VEB 0x2
+#define ENABLE_MAC_SPOOFCHK 0x2
+#define DISABLE_MAC_SPOOFCHK 0x3
+
struct amap_set_hsw_context {
u8 interface_id[16];
- u8 rsvd0[14];
+ u8 rsvd0[8];
+ u8 mac_spoofchk[2];
+ u8 rsvd1[4];
u8 pvid_valid;
u8 pport;
- u8 rsvd1[6];
+ u8 rsvd2[6];
u8 port_fwd_type[3];
- u8 rsvd2[7];
+ u8 rsvd3[5];
+ u8 vlan_spoofchk[2];
u8 pvid[16];
- u8 rsvd3[32];
u8 rsvd4[32];
u8 rsvd5[32];
+ u8 rsvd6[32];
} __packed;
struct be_cmd_req_set_hsw_config {
@@ -1774,11 +1794,13 @@ struct amap_get_hsw_req_context {
struct amap_get_hsw_resp_context {
u8 rsvd0[6];
u8 port_fwd_type[3];
- u8 rsvd1[7];
+ u8 rsvd1[5];
+ u8 spoofchk;
+ u8 rsvd2;
u8 pvid[16];
- u8 rsvd2[32];
u8 rsvd3[32];
u8 rsvd4[32];
+ u8 rsvd5[32];
} __packed;
struct be_cmd_req_get_hsw_config {
@@ -2334,9 +2356,9 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
u32 domain);
int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
- u16 intf_id, u16 hsw_mode);
+ u16 intf_id, u16 hsw_mode, u8 spoofchk);
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
- u16 intf_id, u8 *mode);
+ u16 intf_id, u8 *mode, bool *spoofchk);
int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
int be_cmd_get_fw_log_level(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2835dee5dc39..b2476dbfd103 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -123,7 +123,6 @@ static const struct be_ethtool_stat et_stats[] = {
{DRVSTAT_INFO(dma_map_errors)},
/* Number of packets dropped due to random early drop function */
{DRVSTAT_INFO(eth_red_drops)},
- {DRVSTAT_INFO(be_on_die_temperature)},
{DRVSTAT_INFO(rx_roce_bytes_lsd)},
{DRVSTAT_INFO(rx_roce_bytes_msd)},
{DRVSTAT_INFO(rx_roce_frames)},
@@ -368,6 +367,14 @@ static int be_set_coalesce(struct net_device *netdev,
aic++;
}
+ /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
+ * When AIC is disabled, persistently force set EQD value via the
+ * FW cmd, so that we don't have to calculate the delay multiplier
+ * encode value each time EQ_DB is rung
+ */
+ if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
+ be_eqd_update(adapter, true);
+
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 48840889db62..c684bb32b487 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -132,6 +132,18 @@
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
#define DB_EQ_REARM_SHIFT (29) /* bit 29 */
+/* Rearm to interrupt delay encoding */
+#define DB_EQ_R2I_DLY_SHIFT (30) /* bits 30 - 31 */
+
+/* Rearm to interrupt (R2I) delay multiplier encoding represents 3 different
+ * values configured in CEV_REARM2IRPT_DLY_MULT_CSR register. This value is
+ * programmed by host driver while ringing an EQ doorbell(EQ_DB) if a delay
+ * between rearming the EQ and next interrupt on this EQ is desired.
+ */
+#define R2I_DLY_ENC_0 0 /* No delay */
+#define R2I_DLY_ENC_1 1 /* maps to 160us EQ delay */
+#define R2I_DLY_ENC_2 2 /* maps to 96us EQ delay */
+#define R2I_DLY_ENC_3 3 /* maps to 48us EQ delay */
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index e43cc8a73ea7..c0f34845cf59 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -179,7 +179,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
if (lancer_chip(adapter))
return;
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_EEH))
return;
status = be_cmd_intr_set(adapter, enable);
@@ -191,6 +191,9 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
val |= qid & DB_RQ_RING_ID_MASK;
val |= posted << DB_RQ_NUM_POSTED_SHIFT;
@@ -203,6 +206,9 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
{
u32 val = 0;
+ if (be_check_error(adapter, BE_ERROR_HW))
+ return;
+
val |= txo->q.id & DB_TXULP_RING_ID_MASK;
val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
@@ -211,14 +217,15 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped,
+ u32 eq_delay_mult_enc)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (arm)
@@ -227,6 +234,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= 1 << DB_EQ_CLR_SHIFT;
val |= 1 << DB_EQ_EVNT_SHIFT;
val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+ val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
iowrite32(val, adapter->db + DB_EQ_OFFSET);
}
@@ -238,7 +246,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_error)
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (arm)
@@ -662,6 +670,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
+
+ netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -810,6 +820,8 @@ static void wrb_fill_hdr(struct be_adapter *adapter,
SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
+ SET_TX_WRB_HDR_BITS(mgmt, hdr,
+ BE_WRB_F_GET(wrb_params->features, OS2BMC));
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -1146,6 +1158,130 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
txo->pend_wrb_cnt = 0;
}
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT 68
+#define DHCP_SERVER_PORT 67
+#define NET_BIOS_PORT1 137
+#define NET_BIOS_PORT2 138
+#define DHCPV6_RAS_PORT 547
+
+#define is_mc_allowed_on_bmc(adapter, eh) \
+ (!is_multicast_filt_enabled(adapter) && \
+ is_multicast_ether_addr(eh->h_dest) && \
+ !is_broadcast_ether_addr(eh->h_dest))
+
+#define is_bc_allowed_on_bmc(adapter, eh) \
+ (!is_broadcast_filt_enabled(adapter) && \
+ is_broadcast_ether_addr(eh->h_dest))
+
+#define is_arp_allowed_on_bmc(adapter, skb) \
+ (is_arp(skb) && is_arp_filt_enabled(adapter))
+
+#define is_broadcast_packet(eh, adapter) \
+ (is_multicast_ether_addr(eh->h_dest) && \
+ !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
+
+#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
+
+#define is_arp_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & \
+ BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(adapter) \
+ (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
+ struct sk_buff **skb)
+{
+ struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
+ bool os2bmc = false;
+
+ if (!be_is_os2bmc_enabled(adapter))
+ goto done;
+
+ if (!is_multicast_ether_addr(eh->h_dest))
+ goto done;
+
+ if (is_mc_allowed_on_bmc(adapter, eh) ||
+ is_bc_allowed_on_bmc(adapter, eh) ||
+ is_arp_allowed_on_bmc(adapter, (*skb))) {
+ os2bmc = true;
+ goto done;
+ }
+
+ if ((*skb)->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *hdr = ipv6_hdr((*skb));
+ u8 nexthdr = hdr->nexthdr;
+
+ if (nexthdr == IPPROTO_ICMPV6) {
+ struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
+
+ switch (icmp6->icmp6_type) {
+ case NDISC_ROUTER_ADVERTISEMENT:
+ os2bmc = is_ipv6_ra_filt_enabled(adapter);
+ goto done;
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ os2bmc = is_ipv6_na_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (is_udp_pkt((*skb))) {
+ struct udphdr *udp = udp_hdr((*skb));
+
+ switch (udp->dest) {
+ case DHCP_CLIENT_PORT:
+ os2bmc = is_dhcp_client_filt_enabled(adapter);
+ goto done;
+ case DHCP_SERVER_PORT:
+ os2bmc = is_dhcp_srvr_filt_enabled(adapter);
+ goto done;
+ case NET_BIOS_PORT1:
+ case NET_BIOS_PORT2:
+ os2bmc = is_nbios_filt_enabled(adapter);
+ goto done;
+ case DHCPV6_RAS_PORT:
+ os2bmc = is_ipv6_ras_filt_enabled(adapter);
+ goto done;
+ default:
+ break;
+ }
+ }
+done:
+ /* For packets over a vlan, which are destined
+ * to BMC, asic expects the vlan to be inline in the packet.
+ */
+ if (os2bmc)
+ *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+
+ return os2bmc;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1167,6 +1303,18 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
goto drop;
}
+ /* if os2bmc is enabled and if the pkt is destined to bmc,
+ * enqueue the pkt a 2nd time with mgmt bit set.
+ */
+ if (be_send_pkt_to_bmc(adapter, &skb)) {
+ BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+ if (unlikely(!wrb_cnt))
+ goto drop;
+ else
+ skb_get(skb);
+ }
+
if (be_is_txq_full(txo)) {
netif_stop_subqueue(netdev, q_idx);
tx_stats(txo)->tx_stops++;
@@ -1265,7 +1413,8 @@ static int be_vid_config(struct be_adapter *adapter)
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
- if (addl_status(status) ==
+ if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+ addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
return be_set_vlan_promisc(adapter);
} else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
@@ -1466,6 +1615,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
+ vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
return 0;
}
@@ -1478,7 +1628,7 @@ static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
int status;
/* Enable Transparent VLAN Tagging */
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
if (status)
return status;
@@ -1507,7 +1657,7 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
/* Reset Transparent VLAN Tagging. */
status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
- vf_cfg->if_handle, 0);
+ vf_cfg->if_handle, 0, 0);
if (status)
return status;
@@ -1642,6 +1792,39 @@ static int be_set_vf_link_state(struct net_device *netdev, int vf,
return 0;
}
+static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+ u8 spoofchk;
+ int status;
+
+ if (!sriov_enabled(adapter))
+ return -EPERM;
+
+ if (vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ if (BEx_chip(adapter))
+ return -EOPNOTSUPP;
+
+ if (enable == vf_cfg->spoofchk)
+ return 0;
+
+ spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
+
+ status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
+ 0, spoofchk);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Spoofchk change on VF %d failed: %#x\n", vf, status);
+ return be_cmd_status(status);
+ }
+
+ vf_cfg->spoofchk = enable;
+ return 0;
+}
+
static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
ulong now)
{
@@ -1650,61 +1833,110 @@ static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
aic->jiffies = now;
}
-static void be_eqd_update(struct be_adapter *adapter)
+static int be_get_new_eqd(struct be_eq_obj *eqo)
{
- struct be_set_eqd set_eqd[MAX_EVT_QS];
- int eqd, i, num = 0, start;
+ struct be_adapter *adapter = eqo->adapter;
+ int eqd, start;
struct be_aic_obj *aic;
- struct be_eq_obj *eqo;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
- u64 rx_pkts, tx_pkts;
+ u64 rx_pkts = 0, tx_pkts = 0;
ulong now;
u32 pps, delta;
+ int i;
- for_all_evt_queues(adapter, eqo, i) {
- aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
- if (aic->jiffies)
- aic->jiffies = 0;
- eqd = aic->et_eqd;
- goto modify_eqd;
- }
+ aic = &adapter->aic_obj[eqo->idx];
+ if (!aic->enable) {
+ if (aic->jiffies)
+ aic->jiffies = 0;
+ eqd = aic->et_eqd;
+ return eqd;
+ }
- rxo = &adapter->rx_obj[eqo->idx];
+ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
- rx_pkts = rxo->stats.rx_pkts;
+ rx_pkts += rxo->stats.rx_pkts;
} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+ }
- txo = &adapter->tx_obj[eqo->idx];
+ for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
start = u64_stats_fetch_begin_irq(&txo->stats.sync);
- tx_pkts = txo->stats.tx_reqs;
+ tx_pkts += txo->stats.tx_reqs;
} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+ }
- /* Skip, if wrapped around or first calculation */
- now = jiffies;
- if (!aic->jiffies || time_before(now, aic->jiffies) ||
- rx_pkts < aic->rx_pkts_prev ||
- tx_pkts < aic->tx_reqs_prev) {
- be_aic_update(aic, rx_pkts, tx_pkts, now);
- continue;
- }
+ /* Skip, if wrapped around or first calculation */
+ now = jiffies;
+ if (!aic->jiffies || time_before(now, aic->jiffies) ||
+ rx_pkts < aic->rx_pkts_prev ||
+ tx_pkts < aic->tx_reqs_prev) {
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+ return aic->prev_eqd;
+ }
- delta = jiffies_to_msecs(now - aic->jiffies);
- pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
- (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
- eqd = (pps / 15000) << 2;
+ delta = jiffies_to_msecs(now - aic->jiffies);
+ if (delta == 0)
+ return aic->prev_eqd;
- if (eqd < 8)
- eqd = 0;
- eqd = min_t(u32, eqd, aic->max_eqd);
- eqd = max_t(u32, eqd, aic->min_eqd);
+ pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+ (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
- be_aic_update(aic, rx_pkts, tx_pkts, now);
-modify_eqd:
- if (eqd != aic->prev_eqd) {
+ if (eqd < 8)
+ eqd = 0;
+ eqd = min_t(u32, eqd, aic->max_eqd);
+ eqd = max_t(u32, eqd, aic->min_eqd);
+
+ be_aic_update(aic, rx_pkts, tx_pkts, now);
+
+ return eqd;
+}
+
+/* For Skyhawk-R only */
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
+{
+ struct be_adapter *adapter = eqo->adapter;
+ struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
+ ulong now = jiffies;
+ int eqd;
+ u32 mult_enc;
+
+ if (!aic->enable)
+ return 0;
+
+ if (time_before_eq(now, aic->jiffies) ||
+ jiffies_to_msecs(now - aic->jiffies) < 1)
+ eqd = aic->prev_eqd;
+ else
+ eqd = be_get_new_eqd(eqo);
+
+ if (eqd > 100)
+ mult_enc = R2I_DLY_ENC_1;
+ else if (eqd > 60)
+ mult_enc = R2I_DLY_ENC_2;
+ else if (eqd > 20)
+ mult_enc = R2I_DLY_ENC_3;
+ else
+ mult_enc = R2I_DLY_ENC_0;
+
+ aic->prev_eqd = eqd;
+
+ return mult_enc;
+}
+
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
+{
+ struct be_set_eqd set_eqd[MAX_EVT_QS];
+ struct be_aic_obj *aic;
+ struct be_eq_obj *eqo;
+ int i, num = 0, eqd;
+
+ for_all_evt_queues(adapter, eqo, i) {
+ aic = &adapter->aic_obj[eqo->idx];
+ eqd = be_get_new_eqd(eqo);
+ if (force_update || eqd != aic->prev_eqd) {
set_eqd[num].delay_multiplier = (eqd * 65)/100;
set_eqd[num].eq_id = eqo->q.id;
aic->prev_eqd = eqd;
@@ -2212,7 +2444,7 @@ static void be_eq_clean(struct be_eq_obj *eqo)
{
int num = events_get(eqo);
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
static void be_rx_cq_clean(struct be_rx_obj *rxo)
@@ -2236,7 +2468,9 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
if (lancer_chip(adapter))
break;
- if (flush_wait++ > 10 || be_hw_error(adapter)) {
+ if (flush_wait++ > 50 ||
+ be_check_error(adapter,
+ BE_ERROR_HW)) {
dev_warn(&adapter->pdev->dev,
"did not receive flush compl\n");
break;
@@ -2297,7 +2531,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
pending_txqs--;
}
- if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
+ if (pending_txqs == 0 || ++timeo > 10 ||
+ be_check_error(adapter, BE_ERROR_HW))
break;
mdelay(1);
@@ -2573,7 +2808,7 @@ static irqreturn_t be_intx(int irq, void *dev)
if (num_evts)
eqo->spurious_intr = 0;
}
- be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+ be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
/* Return IRQ_HANDLED only for the the first spurious intr
* after a valid intr to stop the kernel from branding
@@ -2589,7 +2824,7 @@ static irqreturn_t be_msix(int irq, void *dev)
{
struct be_eq_obj *eqo = dev;
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
napi_schedule(&eqo->napi);
return IRQ_HANDLED;
}
@@ -2838,6 +3073,7 @@ int be_poll(struct napi_struct *napi, int budget)
int max_work = 0, work, i, num_evts;
struct be_rx_obj *rxo;
struct be_tx_obj *txo;
+ u32 mult_enc = 0;
num_evts = events_get(eqo);
@@ -2863,10 +3099,18 @@ int be_poll(struct napi_struct *napi, int budget)
if (max_work < budget) {
napi_complete(napi);
- be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
+
+ /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+ * delay via a delay multiplier encoding value
+ */
+ if (skyhawk_chip(adapter))
+ mult_enc = be_get_eq_delay_mult_enc(eqo);
+
+ be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
+ mult_enc);
} else {
/* As we'll continue in polling mode, count and clear events */
- be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
+ be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
}
return max_work;
}
@@ -2898,22 +3142,19 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- bool error_detected = false;
struct device *dev = &adapter->pdev->dev;
- struct net_device *netdev = adapter->netdev;
- if (be_hw_error(adapter))
+ if (be_check_error(adapter, BE_ERROR_HW))
return;
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ be_set_error(adapter, BE_ERROR_UE);
sliport_err1 = ioread32(adapter->db +
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
- adapter->hw_error = true;
- error_detected = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
@@ -2945,12 +3186,12 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (ue_lo || ue_hi) {
- error_detected = true;
dev_err(dev,
"Unrecoverable Error detected in the adapter");
dev_err(dev, "Please reboot server to recover");
if (skyhawk_chip(adapter))
- adapter->hw_error = true;
+ be_set_error(adapter, BE_ERROR_UE);
+
for (i = 0; ue_lo; ue_lo >>= 1, i++) {
if (ue_lo & 1)
dev_err(dev, "UE: %s bit set\n",
@@ -2963,8 +3204,6 @@ void be_detect_error(struct be_adapter *adapter)
}
}
}
- if (error_detected)
- netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -3015,7 +3254,7 @@ fail:
dev_warn(dev, "MSIx enable failed\n");
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return num_vec;
return 0;
}
@@ -3062,7 +3301,7 @@ static int be_irq_register(struct be_adapter *adapter)
if (status == 0)
goto done;
/* INTx is not supported for VF */
- if (!be_physfn(adapter))
+ if (be_virtfn(adapter))
return status;
}
@@ -3229,9 +3468,12 @@ static int be_rx_qs_create(struct be_adapter *adapter)
memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
- /* First time posting */
+ /* Post 1 less than RXQ-len to avoid head being equal to tail,
+ * which is a queue empty condition
+ */
for_all_rx_queues(adapter, rxo, i)
- be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+ be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
+
return 0;
}
@@ -3263,7 +3505,7 @@ static int be_open(struct net_device *netdev)
for_all_evt_queues(adapter, eqo, i) {
napi_enable(&eqo->napi);
be_enable_busy_poll(eqo);
- be_eq_notify(adapter, eqo->q.id, true, true, 0);
+ be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
}
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -3563,7 +3805,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
@@ -3610,6 +3852,7 @@ static int be_vf_setup(struct be_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
+ bool spoofchk;
old_vfs = pci_num_vf(adapter->pdev);
@@ -3657,6 +3900,12 @@ static int be_vf_setup(struct be_adapter *adapter)
if (!old_vfs)
be_cmd_config_qos(adapter, 0, 0, vf + 1);
+ status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
+ vf_cfg->if_handle, NULL,
+ &spoofchk);
+ if (!status)
+ vf_cfg->spoofchk = spoofchk;
+
if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
be_cmd_set_logical_link_config(adapter,
@@ -3733,8 +3982,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
* *only* if it is RSS-capable.
*/
if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
- !be_physfn(adapter) || (be_is_mc(adapter) &&
- !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
+ be_virtfn(adapter) ||
+ (be_is_mc(adapter) &&
+ !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
res->max_tx_qs = 1;
} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
struct be_resources super_nic_res = {0};
@@ -4075,7 +4325,7 @@ static int be_func_init(struct be_adapter *adapter)
msleep(100);
/* We can clear all errors when function reset succeeds */
- be_clear_all_error(adapter);
+ be_clear_error(adapter, BE_CLEAR_ALL);
}
/* Tell FW we're ready to fire cmds */
@@ -4182,7 +4432,7 @@ static void be_netpoll(struct net_device *netdev)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+ be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
napi_schedule(&eqo->napi);
}
}
@@ -4666,14 +4916,11 @@ static int lancer_fw_download(struct be_adapter *adapter,
return 0;
}
-#define BE2_UFI 2
-#define BE3_UFI 3
-#define BE3R_UFI 10
-#define SH_UFI 4
-#define SH_P2_UFI 11
-
-static int be_get_ufi_type(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+ struct flash_file_hdr_g3 *fhdr)
{
if (!fhdr) {
dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
@@ -4685,43 +4932,22 @@ static int be_get_ufi_type(struct be_adapter *adapter,
*/
switch (fhdr->build[0]) {
case BLD_STR_UFI_TYPE_SH:
- return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
- SH_UFI;
+ if (!skyhawk_chip(adapter))
+ return false;
+ break;
case BLD_STR_UFI_TYPE_BE3:
- return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
- BE3_UFI;
+ if (!BE3_chip(adapter))
+ return false;
+ break;
case BLD_STR_UFI_TYPE_BE2:
- return BE2_UFI;
- default:
- return -1;
- }
-}
-
-/* Check if the flash image file is compatible with the adapter that
- * is being flashed.
- * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
- * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
- */
-static bool be_check_ufi_compatibility(struct be_adapter *adapter,
- struct flash_file_hdr_g3 *fhdr)
-{
- int ufi_type = be_get_ufi_type(adapter, fhdr);
-
- switch (ufi_type) {
- case SH_P2_UFI:
- return skyhawk_chip(adapter);
- case SH_UFI:
- return (skyhawk_chip(adapter) &&
- adapter->asic_rev < ASIC_REV_P2);
- case BE3R_UFI:
- return BE3_chip(adapter);
- case BE3_UFI:
- return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
- case BE2_UFI:
- return BE2_chip(adapter);
+ if (!BE2_chip(adapter))
+ return false;
+ break;
default:
return false;
}
+
+ return (fhdr->asic_type_rev >= adapter->asic_rev);
}
static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
@@ -4829,7 +5055,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
adapter->if_handle,
mode == BRIDGE_MODE_VEPA ?
PORT_FWD_TYPE_VEPA :
- PORT_FWD_TYPE_VEB);
+ PORT_FWD_TYPE_VEB, 0);
if (status)
goto err;
@@ -4861,7 +5087,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
hsw_mode = PORT_FWD_TYPE_VEB;
} else {
status = be_cmd_get_hsw_config(adapter, NULL, 0,
- adapter->if_handle, &hsw_mode);
+ adapter->if_handle, &hsw_mode,
+ NULL);
if (status)
return 0;
}
@@ -5014,6 +5241,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_set_vf_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
+ .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
@@ -5118,7 +5346,7 @@ static void be_err_detection_task(struct work_struct *work)
be_detect_error(adapter);
- if (adapter->hw_error) {
+ if (be_check_error(adapter, BE_ERROR_HW)) {
be_cleanup(adapter);
/* As of now error recovery support is in Lancer only */
@@ -5182,7 +5410,9 @@ static void be_worker(struct work_struct *work)
be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
}
- be_eqd_update(adapter);
+ /* EQ-delay update for Skyhawk is done while notifying EQ */
+ if (!skyhawk_chip(adapter))
+ be_eqd_update(adapter, false);
if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
be_log_sfp_info(adapter);
@@ -5202,7 +5432,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
static int db_bar(struct be_adapter *adapter)
{
- if (lancer_chip(adapter) || !be_physfn(adapter))
+ if (lancer_chip(adapter) || be_virtfn(adapter))
return 0;
else
return 4;
@@ -5381,6 +5611,30 @@ static void be_remove(struct pci_dev *pdev)
free_netdev(adapter->netdev);
}
+static ssize_t be_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct be_adapter *adapter = dev_get_drvdata(dev);
+
+ /* Unit: millidegree Celsius */
+ if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
+ return -EIO;
+ else
+ return sprintf(buf, "%u\n",
+ adapter->hwmon_info.be_on_die_temp * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ be_hwmon_show_temp, NULL, 1);
+
+static struct attribute *be_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(be_hwmon);
+
static char *mc_name(struct be_adapter *adapter)
{
char *str = ""; /* default */
@@ -5500,6 +5754,16 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_schedule_err_detection(adapter);
+ /* On Die temperature not supported for VF. */
+ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
+ adapter->hwmon_info.hwmon_dev =
+ devm_hwmon_device_register_with_groups(&pdev->dev,
+ DRV_NAME,
+ adapter,
+ be_hwmon_groups);
+ adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
+ }
+
dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
func_name(adapter), mc_name(adapter), adapter->port_name);
@@ -5592,8 +5856,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- if (!adapter->eeh_error) {
- adapter->eeh_error = true;
+ if (!be_check_error(adapter, BE_ERROR_EEH)) {
+ be_set_error(adapter, BE_ERROR_EEH);
be_cancel_err_detection(adapter);
@@ -5640,7 +5904,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
pci_cleanup_aer_uncorrect_error_status(pdev);
- be_clear_all_error(adapter);
+ be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 132866433a25..60368207bf58 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index e6f7eb1a7d87..cde6ef905ec4 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 66d47e448e4d..bf4cf3fbb5f2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2118,6 +2118,82 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct resource *r;
+ int s = 0;
+
+ r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+ if (r)
+ s = resource_size(r);
+
+ return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+static u32 fec_enet_register_offset[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+ FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+ FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+ FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+ FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+ FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+ FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+ FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static u32 fec_enet_register_offset[] = {
+ FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+ FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+ FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+ FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+ FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+ FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+ FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+ FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+ FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+ u32 *buf = (u32 *)regbuf;
+ u32 i, off;
+
+ memset(buf, 0, regs->len);
+
+ for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+ off = fec_enet_register_offset[i] / 4;
+ buf[off] = readl(&theregs[off]);
+ }
+}
+
static int fec_enet_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
@@ -2515,6 +2591,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
+ .get_regs_len = fec_enet_get_regs_len,
+ .get_regs = fec_enet_get_regs,
.nway_reset = fec_enet_nway_reset,
.get_link = ethtool_op_get_link,
.get_coalesce = fec_enet_get_coalesce,
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a583d89b13c4..a15663ad7f5e 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
writel(tmp, fep->hwp + FEC_ATIME_INC);
+ corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
writel(corr_period, fep->hwp + FEC_ATIME_CORR);
/* dummy read to update the timer. */
timecounter_read(&fep->tc);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 9b3639eae676..c5f299d74549 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -86,7 +86,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
struct net_device *dev = fep->ndev;
const struct fs_platform_info *fpi = fep->fpi;
cbd_t __iomem *bdp;
- struct sk_buff *skb, *skbn, *skbt;
+ struct sk_buff *skb, *skbn;
int received = 0;
u16 pkt_len, sc;
int curidx;
@@ -161,10 +161,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skb_reserve(skbn, 2); /* align IP header */
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
- /* swap */
- skbt = skb;
- skb = skbn;
- skbn = skbt;
+ swap(skb, skbn);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4ee080d49bc0..ff875028fdff 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -516,6 +516,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
return &dev->stats;
}
+static int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+ eth_mac_addr(dev, p);
+
+ gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+ return 0;
+}
+
static const struct net_device_ops gfar_netdev_ops = {
.ndo_open = gfar_enet_open,
.ndo_start_xmit = gfar_start_xmit,
@@ -526,7 +535,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = gfar_netpoll,
@@ -1411,6 +1420,8 @@ static int gfar_probe(struct platform_device *ofdev)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
gfar_init_addr_hash_table(priv);
/* Insert receive time stamps into padding alignment bytes */
@@ -2254,7 +2265,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rq = 0;
int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
- unsigned long flags;
unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
rq = skb->queue_mapping;
@@ -2434,19 +2444,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, bytes_sent);
- /* We can work in parallel with gfar_clean_tx_ring(), except
- * when modifying num_txbdfree. Note that we didn't grab the lock
- * when we were reading the num_txbdfree and checking for available
- * space, that's because outside of this function it can only grow,
- * and once we've got needed space, it cannot suddenly disappear.
- *
- * The lock also protects us from gfar_error(), which can modify
- * regs->tstat and thus retrigger the transfers, which is why we
- * also must grab the lock before setting ready bit for the first
- * to be transmitted BD.
- */
- spin_lock_irqsave(&tx_queue->txlock, flags);
-
gfar_wmb();
txbdp_start->lstatus = cpu_to_be32(lstatus);
@@ -2463,8 +2460,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ /* We can work in parallel with gfar_clean_tx_ring(), except
+ * when modifying num_txbdfree. Note that we didn't grab the lock
+ * when we were reading the num_txbdfree and checking for available
+ * space, that's because outside of this function it can only grow.
+ */
+ spin_lock_bh(&tx_queue->txlock);
/* reduce TxBD free count */
tx_queue->num_txbdfree -= (nr_txbds);
+ spin_unlock_bh(&tx_queue->txlock);
/* If the next BD still needs to be cleaned up, then the bds
* are full. We need to tell the kernel to stop sending us stuff.
@@ -2478,9 +2482,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Tell the DMA to go go go */
gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
- /* Unlock priv */
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
-
return NETDEV_TX_OK;
dma_map_err:
@@ -2622,7 +2623,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_dirtytx = tx_queue->skb_dirtytx;
while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
- unsigned long flags;
frags = skb_shinfo(skb)->nr_frags;
@@ -2686,9 +2686,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
TX_RING_MOD_MASK(tx_ring_size);
howmany++;
- spin_lock_irqsave(&tx_queue->txlock, flags);
+ spin_lock(&tx_queue->txlock);
tx_queue->num_txbdfree += nr_txbds;
- spin_unlock_irqrestore(&tx_queue->txlock, flags);
+ spin_unlock(&tx_queue->txlock);
}
/* If we freed a buffer, we can restart transmission, if necessary */
@@ -3411,21 +3411,12 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
if (events & IEVENT_CRL)
dev->stats.tx_aborted_errors++;
if (events & IEVENT_XFUN) {
- unsigned long flags;
-
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
atomic64_inc(&priv->extra_stats.tx_underrun);
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- /* Reactivate the Tx Queues */
- gfar_write(&regs->tstat, gfargrp->tstat);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
+ schedule_work(&priv->reset_task);
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 3b39fdddeb57..d49bee38cd31 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -798,7 +798,7 @@ static void hip04_free_ring(struct net_device *ndev, struct device *d)
for (i = 0; i < RX_DESC_NUM; i++)
if (priv->rx_buf[i])
- put_page(virt_to_head_page(priv->rx_buf[i]));
+ skb_free_frag(priv->rx_buf[i]);
for (i = 0; i < TX_DESC_NUM; i++)
if (priv->tx_skb[i])
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 0ffdcd381fdd..a5e077eac99a 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -500,7 +500,6 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- dev->last_rx = jiffies;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b9df0cbd0a38..b60a34d982a9 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2999,7 +2999,7 @@ static struct platform_driver emac_driver = {
static void __init emac_make_bootlist(void)
{
struct device_node *np = NULL;
- int j, max, i = 0, k;
+ int j, max, i = 0;
int cell_indices[EMAC_BOOT_LIST_SIZE];
/* Collect EMACs */
@@ -3026,12 +3026,8 @@ static void __init emac_make_bootlist(void)
for (i = 0; max > 1 && (i < (max - 1)); i++)
for (j = i; j < max; j++) {
if (cell_indices[i] > cell_indices[j]) {
- np = emac_boot_list[i];
- emac_boot_list[i] = emac_boot_list[j];
- emac_boot_list[j] = np;
- k = cell_indices[i];
- cell_indices[i] = cell_indices[j];
- cell_indices[j] = k;
+ swap(emac_boot_list[i], emac_boot_list[j]);
+ swap(cell_indices[i], cell_indices[j]);
}
}
}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 18134766a114..29bbb628d712 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.04"
+#define ibmveth_driver_version "1.05"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -100,6 +100,8 @@ struct ibmveth_stat ibmveth_stats[] = {
{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+ { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
+ { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
};
/* simple methods of getting data from the current rxq entry */
@@ -852,6 +854,10 @@ static int ibmveth_set_features(struct net_device *dev,
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
int rc;
+ netdev_features_t changed = features ^ dev->features;
+
+ if (features & NETIF_F_TSO & changed)
+ netdev_info(dev, "TSO feature requires all partitions to have updated driver");
if (rx_csum == adapter->rx_csum)
return 0;
@@ -1035,6 +1041,15 @@ retry_bounce:
descs[i+1].fields.address = dma_addr;
}
+ if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
+ /* Put -1 in the IP checksum to tell phyp it
+ * is a largesend packet and put the mss in the TCP checksum.
+ */
+ ip_hdr(skb)->check = 0xffff;
+ tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ adapter->tx_large_packets++;
+ }
+
if (ibmveth_send(adapter, descs)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
@@ -1080,6 +1095,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev;
int frames_processed = 0;
unsigned long lpar_rc;
+ struct iphdr *iph;
restart_poll:
while (frames_processed < budget) {
@@ -1122,10 +1138,23 @@ restart_poll:
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good)
+ if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
+ iph = (struct iphdr *)skb->data;
+
+ /* If the IP checksum is not offloaded and if the packet
+ * is large send, the checksum must be rebuilt.
+ */
+ if (iph->check == 0xffff) {
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ adapter->rx_large_packets++;
+ }
+ }
+ }
- netif_receive_skb(skb); /* send it up */
+ napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += length;
@@ -1422,8 +1451,14 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
+ /* TSO is disabled by default */
+ netdev->hw_features |= NETIF_F_TSO;
+
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ if (firmware_has_feature(FW_FEATURE_CMO))
+ memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
+
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
int error;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 1f37499d4398..41dedb1fb2ae 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -104,7 +104,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
-static int pool_active[] = { 1, 1, 0, 0, 0};
+static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
+static int pool_active[] = { 1, 1, 0, 0, 1};
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -160,6 +161,8 @@ struct ibmveth_adapter {
u64 rx_no_buffer;
u64 tx_map_failed;
u64 tx_send_failed;
+ u64 tx_large_packets;
+ u64 rx_large_packets;
};
/*
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 1a450f4b6b12..d2657a412768 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -874,7 +874,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
{
struct cb *cb;
unsigned long flags;
- int err = 0;
+ int err;
spin_lock_irqsave(&nic->cb_lock, flags);
@@ -2922,9 +2922,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- init_timer(&nic->watchdog);
- nic->watchdog.function = e100_watchdog;
- nic->watchdog.data = (unsigned long)nic;
+ setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 983eb4e6f7aa..74dc15055971 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2079,11 +2079,6 @@ static void *e1000_alloc_frag(const struct e1000_adapter *a)
return data;
}
-static void e1000_free_frag(const void *data)
-{
- put_page(virt_to_head_page(data));
-}
-
/**
* e1000_clean_rx_ring - Free Rx Buffers per Queue
* @adapter: board private structure
@@ -2107,7 +2102,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (buffer_info->rxbuf.data) {
- e1000_free_frag(buffer_info->rxbuf.data);
+ skb_free_frag(buffer_info->rxbuf.data);
buffer_info->rxbuf.data = NULL;
}
} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
@@ -4594,28 +4589,28 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
data = e1000_alloc_frag(adapter);
/* Failed allocation, critical failure */
if (!data) {
- e1000_free_frag(olddata);
+ skb_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
if (!e1000_check_64k_bound(adapter, data, bufsz)) {
/* give up */
- e1000_free_frag(data);
- e1000_free_frag(olddata);
+ skb_free_frag(data);
+ skb_free_frag(olddata);
adapter->alloc_rx_buff_failed++;
break;
}
/* Use new allocation */
- e1000_free_frag(olddata);
+ skb_free_frag(olddata);
}
buffer_info->dma = dma_map_single(&pdev->dev,
data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- e1000_free_frag(data);
+ skb_free_frag(data);
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break;
@@ -4637,7 +4632,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
- e1000_free_frag(data);
+ skb_free_frag(data);
buffer_info->rxbuf.data = NULL;
buffer_info->dma = 0;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 08f22f348800..2af603f3e418 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 535a9430976d..a2162e11673e 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index dc79ed85030b..5f7016442ec4 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -2010,7 +2010,7 @@ const struct e1000_info e1000_82573_info = {
.flags2 = FLAG2_DISABLE_ASPM_L1
| FLAG2_DISABLE_ASPM_L0S,
.pba = 20,
- .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_m88,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 2e758f796d60..abc6a9abff98 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0570c668ec3d..133d4074dbe4 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0abc942c966e..0b748d1959d9 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ struct e1000_info;
#define DEFAULT_RADV 8
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
+#define PCICFG_DESC_RING_STATUS 0xe4
+#define FLUSH_DESC_REQUIRED 0x100
/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
@@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define INCVALUE_SHIFT_25MHz 18
#define INCPERIOD_25MHz 1
+#define INCVALUE_24MHz 125
+#define INCVALUE_SHIFT_24MHz 14
+#define INCPERIOD_24MHz 3
+
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
* by simply reading the clock before it overflows.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 11f486e4ff7b..ad6daa656d3e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
+ u32 rctl, fext_nvm11, tarc0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ tarc0 &= 0xcfffffff;
+ /* set bit 29 (value of MULR requests is now 2) */
+ tarc0 |= 0x20000000;
+ ew32(TARC(0), tarc0);
+ }
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
switch (hw->mac.type) {
@@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
+ u32 rctl, fext_nvm11, tarc0;
u16 phy_reg;
rctl = er32(RCTL);
@@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
switch (hw->mac.type) {
+ case e1000_pch_spt:
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ /* set bit 29 (value of MULR requests is now 0) */
+ tarc0 &= 0xcfffffff;
+ ew32(TARC(0), tarc0);
+ /* fall through */
case e1000_80003es2lan:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 19e8c487db06..c9da4654e9ca 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9d81c0317433..b074b9a667b3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1014,8 +1014,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
u16 speed, duplex, scale = 0;
u16 max_snoop, max_nosnoop;
u16 max_ltr_enc; /* max LTR latency encoded */
- s64 lat_ns; /* latency (ns) */
- s64 value;
+ u64 value;
u32 rxa;
if (!hw->adapter->max_frame_size) {
@@ -1040,14 +1039,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
* 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
* 1=2^5ns, 2=2^10ns,...5=2^25ns.
*/
- lat_ns = ((s64)rxa * 1024 -
- (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
- if (lat_ns < 0)
- lat_ns = 0;
- else
- do_div(lat_ns, speed);
+ rxa *= 512;
+ value = (rxa > hw->adapter->max_frame_size) ?
+ (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
+ 0;
- value = lat_ns;
while (value > PCI_LTR_VALUE_MASK) {
scale++;
value = DIV_ROUND_UP(value, (1 << 5));
@@ -1563,7 +1559,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
((adapter->hw.mac.type >= e1000_pch2lan) &&
(!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
- adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+ adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
hw->mac.ops.blink_led = NULL;
}
@@ -5681,7 +5677,7 @@ const struct e1000_info e1000_ich8_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 8,
- .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -5754,7 +5750,7 @@ const struct e1000_info e1000_pch2_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = 9018,
+ .max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -5774,7 +5770,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = 9018,
+ .max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -5794,7 +5790,7 @@ const struct e1000_info e1000_pch_spt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = 9018,
+ .max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 770a573b9eea..26459853c6be 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,8 +98,15 @@
#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
/* bit for disabling packet buffer read */
#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
-
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 30b74d590bee..e59d7c283cd4 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 0513d90cdeea..8284618af9ff 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 06edfca1a35e..cc9b3befc2bc 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index a8c27f98f7b0..0b9ea5952b07 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c509a5c900f5..e62b9dcb91fe 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- case e1000_pch_spt:
- /* On I217, I218 and I219, the clock frequency is 25MHz
- * or 96MHz as indicated by the System Clock Frequency
- * Indication
- */
- if (((hw->mac.type != e1000_pch_lpt) &&
- (hw->mac.type != e1000_pch_spt)) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHz;
incvalue = INCVALUE_96MHz;
shift = INCVALUE_SHIFT_96MHz;
adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ } else {
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ }
+ break;
+ case e1000_pch_spt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHz;
+ incvalue = INCVALUE_24MHz;
+ shift = INCVALUE_SHIFT_24MHz;
+ adapter->cc.shift = shift;
break;
}
- /* fall-through */
+ return -EINVAL;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3788,6 +3796,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
}
/**
+ * e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ *
+ * We want to clear all pending descriptors from the TX ring.
+ * zeroing happens when the HW reads the regs. We assign the ring itself as
+ * the data of the next descriptor. We don't care about the data we are about
+ * to reset the HW.
+ */
+static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+ u16 size = 512;
+
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl | E1000_TCTL_EN);
+ tdt = er32(TDT(0));
+ BUG_ON(tdt != tx_ring->next_to_use);
+ tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
+ tx_desc->buffer_addr = tx_ring->dma;
+
+ tx_desc->lower.data = cpu_to_le32(txd_lower | size);
+ tx_desc->upper.data = 0;
+ /* flush descriptors to memory before notifying the HW */
+ wmb();
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ ew32(TDT(0), tx_ring->next_to_use);
+ mmiowb();
+ usleep_range(200, 250);
+}
+
+/**
+ * e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ *
+ * Mark all descriptors in the RX ring as consumed and disable the rx ring
+ */
+static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
+{
+ u32 rctl, rxdctl;
+ struct e1000_hw *hw = &adapter->hw;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+
+ rxdctl = er32(RXDCTL(0));
+ /* zero the lower 14 bits (prefetch and host thresholds) */
+ rxdctl &= 0xffffc000;
+
+ /* update thresholds: prefetch threshold to 31, host threshold to 1
+ * and make sure the granularity is "descriptors" and not "cache lines"
+ */
+ rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+ ew32(RXDCTL(0), rxdctl);
+ /* momentarily enable the RX ring for the changes to take effect */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting the HW
+ * or before changing the device state to D3 during runtime (runtime PM).
+ *
+ * Failure to do this will cause the HW to enter a unit hang state which can
+ * only be released by PCI reset on the device
+ *
+ */
+
+static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
+{
+ u16 hang_state;
+ u32 fext_nvm11, tdlen;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* First, disable MULR fix in FEXTNVM11 */
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ /* do nothing if we're not in faulty state, or if the queue is empty */
+ tdlen = er32(TDLEN(0));
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
+ return;
+ e1000_flush_tx_ring(adapter);
+ /* recheck, maybe the fault is caused by the rx ring */
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (hang_state & FLUSH_DESC_REQUIRED)
+ e1000_flush_rx_ring(adapter);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -3807,7 +3917,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* reset Packet Buffer Allocation to default */
ew32(PBA, pba);
- if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+ if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
* rounded up to the next 1KB and expressed in KB. Likewise,
@@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
}
+ if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
+ if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+ u32 reg;
+
+ /* Fextnvm7 @ 0xe4[2] = 1 */
+ reg = er32(FEXTNVM7);
+ reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
+ ew32(FEXTNVM7, reg);
+ /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
+ reg = er32(FEXTNVM9);
+ reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
+ E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
+ ew32(FEXTNVM9, reg);
+ }
+
}
int e1000e_up(struct e1000_adapter *adapter)
@@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
spin_unlock(&adapter->stats64_lock);
e1000e_flush_descriptors(adapter);
- e1000_clean_tx_ring(adapter->tx_ring);
- e1000_clean_rx_ring(adapter->rx_ring);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (reset && !pci_channel_offline(adapter->pdev))
- e1000e_reset(adapter);
+ if (!pci_channel_offline(adapter->pdev)) {
+ if (reset)
+ e1000e_reset(adapter);
+ else if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
+ }
+ e1000_clean_tx_ring(adapter->tx_ring);
+ e1000_clean_rx_ring(adapter->rx_ring);
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4151,9 +4281,16 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
cc);
struct e1000_hw *hw = &adapter->hw;
cycle_t systim, systim_next;
+ /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
+ * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
+ * to occur between reads, so if we read a vale close to overflow, we
+ * wait for overflow to occur and read both registers when its safe.
+ */
+ u32 systim_overflow_latch_fix = 0x3FFFFFFF;
- /* latch SYSTIMH on read of SYSTIML */
- systim = (cycle_t)er32(SYSTIML);
+ do {
+ systim = (cycle_t)er32(SYSTIML);
+ } while (systim > systim_overflow_latch_fix);
systim |= (cycle_t)er32(SYSTIMH) << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
@@ -4196,9 +4333,9 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
adapter->rx_ps_bsize0 = 128;
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->tx_ring_count = E1000_DEFAULT_TXD;
adapter->rx_ring_count = E1000_DEFAULT_RXD;
@@ -5781,17 +5918,17 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
/* Supported frame sizes */
- if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+ if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
(max_frame > adapter->max_hw_frame_size)) {
e_err("Unsupported MTU setting\n");
return -EINVAL;
@@ -5831,10 +5968,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = 4096;
/* adjust allocation if LPE protects us, and we aren't using SBP */
- if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
- adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
- + ETH_FCS_LEN;
+ if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
+ adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
if (netif_running(netdev))
e1000e_up(adapter);
@@ -6678,6 +6813,19 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+ if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_RXFCS;
+
+ return features;
+}
+
static int e1000_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -6734,6 +6882,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_poll_controller = e1000_netpoll,
#endif
.ndo_set_features = e1000_set_features,
+ .ndo_fix_features = e1000_fix_features,
};
/**
@@ -7289,7 +7438,7 @@ static int __init e1000_init_module(void)
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index fa6b1036a327..49f205c023bf 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 342bf69efab5..5d46967e0d1f 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index aa1923f7ebdd..6d8c39abee16 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b2005e13fb01..de13aeacae97 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 537d2780b408..55bfe473514d 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 8d7b21dc7e19..25a0ad5102d6 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 85eefc4832ba..b24e5fee17f2 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5d47307121ab..ec76c3fa3a04 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
I40E_FD_STAT_SB,
+ I40E_FD_STAT_ATR_TUNNEL,
I40E_FD_STAT_PF_COUNT
};
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
struct i40e_fdir_filter {
struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
- u16 fd_sb_cnt_idx;
- u16 fd_atr_cnt_idx;
unsigned long fd_flush_timestamp;
u32 fd_flush_cnt;
u32 fd_add_err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4cbaaeb902c4..9a68c65b17ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
/* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
return *data;
}
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+ struct i40e_vf *vfs = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+ return true;
+ return false;
+}
+
static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
+
+ if (i40e_active_vfs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LOOPBACK] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ goto skip_ol_tests;
+ }
+
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_LOOPBACK] = 0;
}
+skip_ol_tests:
+
netif_info(pf, drv, netdev, "testing finished\n");
}
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = pf->fd_sb_cnt_idx;
+ input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1803afeef23e..c8b621e0e7cd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
*
* The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
**/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default:
- /* FIXME: still returns 0 */
- pr_err("Unrecognized EOF %x\n", eof);
- return 0;
+ /* Supported valid eof shall be already checked by
+ * calling i40e_fcoe_eof_is_supported() first,
+ * therefore this default case shall never hit.
+ */
+ WARN_ON(1);
+ return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5b5bea159bd5..52d7d8b8f1f9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
dcb_cfg = &hw->local_dcbx_config;
- /* See if DCB enabled with PFC TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
- !(dcb_cfg->pfc.pfcenable)) {
+ /* Collect Link XOFF stats when PFC is disabled */
+ if (!dcb_cfg->pfc.pfcenable) {
i40e_update_link_xoff_rx(pf);
return;
}
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_atr_match, &nsd->fd_atr_match);
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_sb_match, &nsd->fd_sb_match);
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
}
i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
/* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
}
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true;
}
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
}
}
@@ -7680,12 +7690,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per PF */
- pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per PF */
- pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7775,7 +7781,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9d95042d5a0f..9a4f2bc70cd2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
- /* set the timestamp */
- tx_buf->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
} else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
}
}
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->auto_disable_flags &
I40E_FLAG_FD_SB_ENABLED)) {
- dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1688,7 +1681,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1821,7 +1813,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
#endif
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1925,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* i40e_atr - Add a Flow Director ATR filter
* @tx_ring: ring to add programming descriptor to
* @skb: send buffer
- * @flags: send flags
+ * @tx_flags: send tx flags
* @protocol: wire protocol
**/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 flags, __be16 protocol)
+ u32 tx_flags, __be16 protocol)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1954,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
- /* snag network header to get L4 type and address */
- hdr.network = skb_network_header(skb);
+ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+ return;
- /* Currently only IPv4/IPv6 with TCP is supported */
- if (protocol == htons(ETH_P_IP)) {
- if (hdr.ipv4->protocol != IPPROTO_TCP)
- return;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
- } else if (protocol == htons(ETH_P_IPV6)) {
- if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * access ihl as u8 to avoid unaligned access on ia64
+ */
+ if (tx_flags & I40E_TX_FLAGS_IPV4)
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ else if (protocol == htons(ETH_P_IPV6))
+ hlen = sizeof(struct ipv6hdr);
+ else
return;
-
- hlen = sizeof(struct ipv6hdr);
} else {
- return;
+ hdr.network = skb_inner_network_header(skb);
+ hlen = skb_inner_network_header_len(skb);
}
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * Note: tx_flags gets modified to reflect inner protocols in
+ * tx_enable_csum function if encap is enabled.
+ */
+ if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+ (hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+ else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+ (hdr.ipv6->nexthdr != IPPROTO_TCP))
+ return;
+
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
@@ -2022,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |=
- ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ else
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
@@ -2045,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* otherwise returns 0 to indicate the flags has been set properly.
**/
#ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
#endif
{
__be16 protocol = skb->protocol;
@@ -2119,16 +2130,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -2220,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -2241,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -2250,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -2273,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -2284,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -2298,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -2396,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2473,13 +2482,13 @@ linearize_chk_done:
* @td_offset: offset for checksum or crc
**/
#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{
unsigned int data_len = skb->data_len;
@@ -2585,9 +2594,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2640,11 +2646,11 @@ dma_error:
* one descriptor.
**/
#ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
#endif
{
unsigned int f;
@@ -2706,7 +2712,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -2732,7 +2738,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4b0b8102cdc3..0dc48dc9ca61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 568e855da0f3..9a5a75b1e2bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4e9376da0518..23f95cdbdfcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
+ if (pf->state & __I40E_TESTING) {
+ dev_warn(&pdev->dev,
+ "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+ err = -EPERM;
+ goto err_out;
+ }
+
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 458fbb421090..f54996f19629 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1156,7 +1149,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1271,7 +1263,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
: 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_ring->netdev->last_rx = jiffies;
rx_desc->wb.qword1.status_error_len = 0;
} while (likely(total_rx_packets < budget));
@@ -1352,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
}
/**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
* @tx_ring: ring to send buffer on
* @flags: the tx flags to be set
@@ -1363,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -1408,16 +1399,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -1468,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -1489,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -1498,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -1521,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
@@ -1534,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -1548,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -1672,7 +1661,44 @@ linearize_chk_done:
}
/**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
@@ -1681,9 +1707,9 @@ linearize_chk_done:
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -1789,9 +1815,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1808,8 +1831,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)))
+ writel(i, tx_ring->tail);
return;
@@ -1831,44 +1858,7 @@ dma_error:
}
/**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
- /* Memory barrier before checking head and tail */
- smp_mb();
-
- /* Check again in a case another CPU has just made room available. */
- if (likely(I40E_DESC_UNUSED(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
@@ -1876,8 +1866,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
{
unsigned int f;
int count = 0;
@@ -1892,7 +1882,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
@@ -1918,11 +1908,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
- if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
/* prepare the xmit flags */
- if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
@@ -1937,7 +1927,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -1958,17 +1948,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
-
- i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 1e49bb1fbac1..e7a34f899f2c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ec9d83a93379..c463ec41579c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a0a9b1fcb5e8..f287186192bb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1836,31 +1836,19 @@ void igb_reinit_locked(struct igb_adapter *adapter)
*
* @adapter: adapter struct
**/
-static s32 igb_enable_mas(struct igb_adapter *adapter)
+static void igb_enable_mas(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 connsw;
- s32 ret_val = 0;
-
- connsw = rd32(E1000_CONNSW);
- if (!(hw->phy.media_type == e1000_media_type_copper))
- return ret_val;
+ u32 connsw = rd32(E1000_CONNSW);
/* configure for SerDes media detect */
- if (!(connsw & E1000_CONNSW_SERDESD)) {
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_SERDESD))) {
connsw |= E1000_CONNSW_ENRGSRC;
connsw |= E1000_CONNSW_AUTOSENSE_EN;
wr32(E1000_CONNSW, connsw);
wrfl();
- } else if (connsw & E1000_CONNSW_SERDESD) {
- /* already SerDes, no need to enable anything */
- return ret_val;
- } else {
- netdev_info(adapter->netdev,
- "MAS: Unable to configure feature, disabling..\n");
- adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
}
- return ret_val;
}
void igb_reset(struct igb_adapter *adapter)
@@ -1980,10 +1968,9 @@ void igb_reset(struct igb_adapter *adapter)
adapter->ei.get_invariants(hw);
adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
}
- if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
- if (igb_enable_mas(adapter))
- dev_err(&pdev->dev,
- "Error enabling Media Auto Sense\n");
+ if ((mac->type == e1000_82575) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_enable_mas(adapter);
}
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -4989,6 +4976,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ unsigned short f;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
@@ -4999,14 +4987,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
- unsigned short f;
-
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- } else {
- count += skb_shinfo(skb)->nr_frags;
- }
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 636f9e350162..ac3ac2a20386 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -643,6 +643,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 824a7ab79ab6..65db69b862fb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1225,7 +1225,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
- .check_overtemp = &ixgbe_tn_check_overtemp,
+ .check_overtemp = &ixgbe_tn_check_overtemp,
};
struct ixgbe_info ixgbe_82598_info = {
@@ -1234,4 +1234,5 @@ struct ixgbe_info ixgbe_82598_info = {
.mac_ops = &mac_ops_82598,
.eeprom_ops = &eeprom_ops_82598,
.phy_ops = &phy_ops_82598,
+ .mvals = ixgbe_mvals_8259X,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e0c363948bf4..6b87d9634614 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -71,7 +71,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
return false;
@@ -79,7 +79,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
if (!(manc & IXGBE_MANC_RCV_TCO_EN))
return false;
- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (factps & IXGBE_FACTPS_MNGCG)
return false;
@@ -510,7 +510,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
/* Check to see if MNG FW could be enabled */
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
!hw->wol_enabled &&
@@ -2378,4 +2378,5 @@ struct ixgbe_info ixgbe_82599_info = {
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_8259X,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 06d8f3cfa099..4c1c26732b67 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -57,6 +57,11 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+/* Base table for registers values that change by MAC */
+const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(8259X)
+};
+
/**
* ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
* control
@@ -91,6 +96,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
default:
@@ -463,7 +470,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
@@ -681,7 +688,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
bus->lan_id = bus->func;
/* check for a port swap */
- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
}
@@ -799,7 +806,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
* Check for EEPROM present first.
* If not present leave as none
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (eec & IXGBE_EEC_PRES) {
eeprom->type = ixgbe_eeprom_spi;
@@ -1283,14 +1290,14 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
return IXGBE_ERR_SWFW_SYNC;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/* Request EEPROM Access */
eec |= IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (eec & IXGBE_EEC_GNT)
break;
udelay(5);
@@ -1299,7 +1306,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Release if grant not acquired */
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
@@ -1309,7 +1316,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
/* Setup EEPROM for Read/Write */
/* Clear CS and SK */
eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
return 0;
@@ -1333,7 +1340,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
break;
usleep_range(50, 100);
@@ -1353,7 +1360,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SMBI) {
hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
return IXGBE_ERR_EEPROM;
@@ -1362,16 +1369,16 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the SWESMBI bit */
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
/* Set the SW EEPROM semaphore bit to request access */
swsm |= IXGBE_SWSM_SWESMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
/* If we set the bit successfully then we got the
* semaphore.
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (swsm & IXGBE_SWSM_SWESMBI)
break;
@@ -1400,11 +1407,11 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 swsm;
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
@@ -1454,15 +1461,15 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/* Toggle CS to flush commands */
eec |= IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
eec &= ~IXGBE_EEC_CS;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1480,7 +1487,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u32 mask;
u32 i;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
/*
* Mask is used to shift "count" bits of "data" out to the EEPROM
@@ -1501,7 +1508,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
else
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
@@ -1518,7 +1525,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
}
@@ -1539,7 +1546,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
* the value of the "DO" bit. During this "shifting in" process the
* "DI" bit should always be clear.
*/
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
@@ -1547,7 +1554,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
data = data << 1;
ixgbe_raise_eeprom_clk(hw, &eec);
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec &= ~(IXGBE_EEC_DI);
if (eec & IXGBE_EEC_DO)
@@ -1571,7 +1578,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* (setting the SK bit), then delay
*/
*eec = *eec | IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1588,7 +1595,7 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
* delay
*/
*eec = *eec & ~IXGBE_EEC_SK;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
@@ -1601,19 +1608,19 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eec |= IXGBE_EEC_CS; /* Pull CS high */
eec &= ~IXGBE_EEC_SK; /* Lower SCK */
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f21f8a165ec4..ec015fed8fa7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -118,6 +118,8 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
+extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
+
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index eafa9ec802ba..ec7b2324b77b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -207,6 +207,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
+ case ixgbe_phy_x550em_ext_t:
case ixgbe_phy_cu_unknown:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -470,16 +471,16 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
/* NVM Register */
- regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
- regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
- regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
+ regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
/* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead
@@ -2594,18 +2595,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input mask;
+ u8 queue;
int err;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
return -EOPNOTSUPP;
- /*
- * Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
+ /* ring_cookie is a masked into a set of queues and ixgbe pools or
+ * we use the drop index.
*/
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= adapter->num_rx_queues))
- return -EINVAL;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf && (ring >= adapter->num_rx_queues))
+ return -EINVAL;
+ else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool))
+ return -EINVAL;
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
/* Don't allow indexes to exist outside of available space */
if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2701,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* program filters to filter memory */
err = ixgbe_fdir_write_perfect_filter_82599(hw,
- &input->filter, input->sw_idx,
- (input->action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[input->action]->reg_idx);
+ &input->filter, input->sw_idx, queue);
if (err)
goto err_out_w_lock;
@@ -3053,7 +3068,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 status;
+ s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -3061,14 +3076,14 @@ static int ixgbe_get_module_info(struct net_device *dev,
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
- if (status != 0)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
- if (status != 0)
+ if (status)
return -EIO;
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
@@ -3095,7 +3110,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
@@ -3112,7 +3127,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
else
status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
- if (status != 0)
+ if (status)
return -EIO;
data[i - ee->offset] = databyte;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5be12a00e1f4..3bf2f3cfd9f6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -81,6 +81,8 @@ const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2014 Intel Corporation.";
+static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
+
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
[board_82599] = &ixgbe_82599_info,
@@ -131,6 +133,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
/* required last entry */
{0, }
};
@@ -2366,7 +2369,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
* - We may have missed the interrupt so always have to
* check if we got a LSC
*/
- if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+ if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
!(eicr & IXGBE_EICR_LSC))
return;
@@ -2386,14 +2389,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
break;
default:
- if (!(eicr & IXGBE_EICR_GPI_SDP0))
+ if (adapter->hw.mac.type >= ixgbe_mac_X540)
+ return;
+ if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
return;
break;
}
- e_crit(drv,
- "Network adapter has been stopped because it has over heated. "
- "Restart the computer. If the problem persists, "
- "power off the system and replace the adapter\n");
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
adapter->interrupt_event = 0;
}
@@ -2403,15 +2405,17 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
- (eicr & IXGBE_EICR_GPI_SDP1)) {
+ (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
e_crit(probe, "Fan has stopped, replace the adapter\n");
/* write to clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
}
}
static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+
if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
return;
@@ -2421,7 +2425,8 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
* Need to check link state so complete overtemp check
* on service task
*/
- if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
+ if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
+ (eicr & IXGBE_EICR_LSC)) &&
(!test_bit(__IXGBE_DOWN, &adapter->state))) {
adapter->interrupt_event = eicr;
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
@@ -2437,28 +2442,46 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
return;
}
- e_crit(drv,
- "Network adapter has been stopped because it has over heated. "
- "Restart the computer. If the problem persists, "
- "power off the system and replace the adapter\n");
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
+static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ if (hw->phy.type == ixgbe_phy_nl)
+ return true;
+ return false;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
}
static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
{
struct ixgbe_hw *hw = &adapter->hw;
- if (eicr & IXGBE_EICR_GPI_SDP2) {
+ if (eicr & IXGBE_EICR_GPI_SDP2(hw)) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
ixgbe_service_event_schedule(adapter);
}
}
- if (eicr & IXGBE_EICR_GPI_SDP1) {
+ if (eicr & IXGBE_EICR_GPI_SDP1(hw)) {
/* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
ixgbe_service_event_schedule(adapter);
@@ -2543,6 +2566,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
bool flush)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
/* don't reenable LSC while waiting for link */
@@ -2552,7 +2576,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- mask |= IXGBE_EIMS_GPI_SDP0;
+ mask |= IXGBE_EIMS_GPI_SDP0(hw);
break;
case ixgbe_mac_X540:
case ixgbe_mac_X550:
@@ -2563,15 +2587,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- mask |= IXGBE_EIMS_GPI_SDP1;
+ mask |= IXGBE_EIMS_GPI_SDP1(hw);
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- mask |= IXGBE_EIMS_GPI_SDP1;
- mask |= IXGBE_EIMS_GPI_SDP2;
+ mask |= IXGBE_EIMS_GPI_SDP1(hw);
+ mask |= IXGBE_EIMS_GPI_SDP2(hw);
/* fall through */
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
+ mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_MAILBOX;
break;
@@ -2626,6 +2652,13 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
+ adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP0_X540);
+ }
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -4703,32 +4736,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_dfwd(adapter);
}
-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
-{
- switch (hw->phy.type) {
- case ixgbe_phy_sfp_avago:
- case ixgbe_phy_sfp_ftl:
- case ixgbe_phy_sfp_intel:
- case ixgbe_phy_sfp_unknown:
- case ixgbe_phy_sfp_passive_tyco:
- case ixgbe_phy_sfp_passive_unknown:
- case ixgbe_phy_sfp_active_unknown:
- case ixgbe_phy_sfp_ftl_active:
- case ixgbe_phy_qsfp_passive_unknown:
- case ixgbe_phy_qsfp_active_unknown:
- case ixgbe_phy_qsfp_intel:
- case ixgbe_phy_qsfp_unknown:
- /* ixgbe_phy_none is set when no SFP module is present */
- case ixgbe_phy_none:
- return true;
- case ixgbe_phy_nl:
- if (hw->mac.type == ixgbe_mac_82598EB)
- return true;
- default:
- return false;
- }
-}
-
/**
* ixgbe_sfp_link_config - set up SFP+ link
* @adapter: pointer to private adapter struct
@@ -4757,7 +4764,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
u32 speed;
bool autoneg, link_up = false;
- u32 ret = IXGBE_ERR_LINK_SETUP;
+ int ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -4833,7 +4840,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
- gpie |= IXGBE_SDP0_GPIEN;
+ gpie |= IXGBE_SDP0_GPIEN_8259X;
break;
case ixgbe_mac_X540:
gpie |= IXGBE_EIMS_TS;
@@ -4845,11 +4852,11 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
/* Enable fan failure interrupt */
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
- gpie |= IXGBE_SDP1_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN(hw);
if (hw->mac.type == ixgbe_mac_82599EB) {
- gpie |= IXGBE_SDP1_GPIEN;
- gpie |= IXGBE_SDP2_GPIEN;
+ gpie |= IXGBE_SDP1_GPIEN_8259X;
+ gpie |= IXGBE_SDP2_GPIEN_8259X;
}
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -4873,6 +4880,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ if (hw->phy.ops.set_phy_power)
+ hw->phy.ops.set_phy_power(hw, true);
+
smp_mb__before_atomic();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4992,6 +5002,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
+
+ if (hw->phy.ops.set_phy_power) {
+ if (!netif_running(adapter->netdev) && !adapter->wol)
+ hw->phy.ops.set_phy_power(hw, false);
+ else
+ hw->phy.ops.set_phy_power(hw, true);
+ }
}
/**
@@ -5260,7 +5277,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
@@ -5672,6 +5689,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int err, queues;
/* disallow open during test */
@@ -5729,6 +5747,8 @@ err_set_queues:
ixgbe_free_irq(adapter);
err_req_irq:
ixgbe_free_all_rx_resources(adapter);
+ if (hw->phy.ops.set_phy_power && !adapter->wol)
+ hw->phy.ops.set_phy_power(&adapter->hw, false);
err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
err_setup_tx:
@@ -5889,6 +5909,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
*enable_wake = !!wufc;
+ if (hw->phy.ops.set_phy_power && !*enable_wake)
+ hw->phy.ops.set_phy_power(hw, false);
ixgbe_release_hw_control(adapter);
@@ -6718,6 +6740,26 @@ static void ixgbe_service_timer(unsigned long data)
ixgbe_service_event_schedule(adapter);
}
+static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
+
+ if (!hw->phy.ops.handle_lasi)
+ return;
+
+ status = hw->phy.ops.handle_lasi(&adapter->hw);
+ if (status != IXGBE_ERR_OVERTEMP)
+ return;
+
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+}
+
static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
{
if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
@@ -6759,6 +6801,7 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
ixgbe_reset_subtask(adapter);
+ ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
ixgbe_check_overtemp_subtask(adapter);
@@ -8022,7 +8065,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
- u32 status;
+ int status;
__u16 mode;
if (nla_type(attr) != IFLA_BRIDGE_MODE)
@@ -8291,6 +8334,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
+ case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
/* check eeprom to see if enabled wol */
if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
@@ -8431,10 +8478,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup hw api */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
+ hw->mvals = ii->mvals;
/* EEPROM */
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 8a2be444113d..526a20bf7488 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -317,14 +317,14 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
**/
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
- if (status == 0) {
+ if (!status) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
&phy_id_low);
@@ -347,6 +347,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
+ case X550_PHY_ID:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -356,6 +357,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case ATH_PHY_ID:
phy_type = ixgbe_phy_nl;
break;
+ case X557_PHY_ID:
+ phy_type = ixgbe_phy_x550em_ext_t;
+ break;
default:
phy_type = ixgbe_phy_unknown;
break;
@@ -1348,6 +1352,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
return IXGBE_ERR_SFP_NOT_PRESENT;
}
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
&identifier);
@@ -1361,9 +1368,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
hw->phy.id = identifier;
- /* LAN ID is needed for sfp_type determination */
- hw->mac.ops.set_lan_id(hw);
-
status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
&comp_codes_10g);
@@ -1793,7 +1797,7 @@ fail:
**/
static void ixgbe_i2c_start(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
/* Start condition must begin with data and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 1);
@@ -1822,7 +1826,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw)
**/
static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
/* Stop condition must begin with data low and clock high */
ixgbe_set_i2c_data(hw, &i2cctl, 0);
@@ -1880,9 +1884,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
}
/* Release SDA line (set high) */
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
- i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl);
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ i2cctl |= IXGBE_I2C_DATA_OUT(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
IXGBE_WRITE_FLUSH(hw);
return status;
@@ -1898,7 +1902,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
s32 status = 0;
u32 i = 0;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 timeout = 10;
bool ack = true;
@@ -1911,7 +1915,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ack = ixgbe_get_i2c_data(hw, &i2cctl);
udelay(1);
@@ -1941,14 +1945,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
**/
static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
ixgbe_raise_i2c_clk(hw, &i2cctl);
/* Minimum high period of clock is 4us */
udelay(IXGBE_I2C_T_HIGH);
- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
*data = ixgbe_get_i2c_data(hw, &i2cctl);
ixgbe_lower_i2c_clk(hw, &i2cctl);
@@ -1969,7 +1973,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
s32 status;
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2005,14 +2009,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
u32 i2cctl_r = 0;
for (i = 0; i < timeout; i++) {
- *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
- if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw))
+ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
break;
}
}
@@ -2027,9 +2031,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
- *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw);
+ *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
@@ -2047,18 +2051,18 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
if (data)
- *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
else
- *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw);
+ *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl);
+ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
/* Verify data was set correctly */
- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
return IXGBE_ERR_I2C;
@@ -2076,7 +2080,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
**/
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
{
- if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw))
+ if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
return true;
return false;
}
@@ -2090,7 +2094,7 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
**/
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
{
- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw));
+ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 i;
ixgbe_i2c_start(hw);
@@ -2137,3 +2141,36 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
return IXGBE_ERR_OVERTEMP;
}
+
+/** ixgbe_set_copper_phy_power - Control power for copper phy
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ **/
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+{
+ u32 status;
+ u16 reg;
+
+ /* Bail if we don't have copper phy */
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return 0;
+
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
+
+ if (on) {
+ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ } else {
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
+ }
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 434643881287..e45988c4dad5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -145,6 +145,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index dd6ba5916dfe..b6f424f3b1a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -91,14 +91,24 @@
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_CAT(r, m) IXGBE_##r##_##m
+
+#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)])
+
/* General Registers */
#define IXGBE_CTRL 0x00000
#define IXGBE_STATUS 0x00008
#define IXGBE_CTRL_EXT 0x00018
#define IXGBE_ESDP 0x00020
#define IXGBE_EODSDP 0x00028
-#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- 0x15F5C : 0x00028))
+
+#define IXGBE_I2CCTL_8259X 0x00028
+#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X
+#define IXGBE_I2CCTL_X550 0x15F5C
+#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550
+#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL)
+
#define IXGBE_LEDCTL 0x00200
#define IXGBE_FRTIMER 0x00048
#define IXGBE_TCPTIMER 0x0004C
@@ -106,17 +116,39 @@
#define IXGBE_EXVET 0x05078
/* NVM Registers */
-#define IXGBE_EEC 0x10010
+#define IXGBE_EEC_8259X 0x10010
+#define IXGBE_EEC_X540 IXGBE_EEC_8259X
+#define IXGBE_EEC_X550 IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X
+#define IXGBE_EEC_X550EM_a 0x15FF8
+#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC)
#define IXGBE_EERD 0x10014
#define IXGBE_EEWR 0x10018
-#define IXGBE_FLA 0x1001C
+#define IXGBE_FLA_8259X 0x1001C
+#define IXGBE_FLA_X540 IXGBE_FLA_8259X
+#define IXGBE_FLA_X550 IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
+#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
#define IXGBE_FLMNGCTL 0x10118
#define IXGBE_FLMNGDATA 0x1011C
#define IXGBE_FLMNGCNT 0x10120
#define IXGBE_FLOP 0x1013C
-#define IXGBE_GRC 0x10200
+#define IXGBE_GRC_8259X 0x10200
+#define IXGBE_GRC_X540 IXGBE_GRC_8259X
+#define IXGBE_GRC_X550 IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X
+#define IXGBE_GRC_X550EM_a 0x15F64
+#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
+
+#define IXGBE_SRAMREL_8259X 0x10210
+#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
+#define IXGBE_SRAMREL_X550EM_a 0x15F6C
+#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
@@ -126,14 +158,55 @@
#define IXGBE_VPDDIAG1 0x10208
/* I2CCTL Bit Masks */
-#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00004000 : 0x00000001)
-#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000200 : 0x00000002)
-#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00001000 : 0x00000004)
-#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \
- 0x00000400 : 0x00000008)
+#define IXGBE_I2C_CLK_IN_8259X 0x00000001
+#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X
+#define IXGBE_I2C_CLK_IN_X550 0x00004000
+#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550
+#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN)
+
+#define IXGBE_I2C_CLK_OUT_8259X 0x00000002
+#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X
+#define IXGBE_I2C_CLK_OUT_X550 0x00000200
+#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550
+#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT)
+
+#define IXGBE_I2C_DATA_IN_8259X 0x00000004
+#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X
+#define IXGBE_I2C_DATA_IN_X550 0x00001000
+#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550
+#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN)
+
+#define IXGBE_I2C_DATA_OUT_8259X 0x00000008
+#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X
+#define IXGBE_I2C_DATA_OUT_X550 0x00000400
+#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550
+#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT)
+
+#define IXGBE_I2C_DATA_OE_N_EN_8259X 0
+#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X
+#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550
+#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN)
+
+#define IXGBE_I2C_BB_EN_8259X 0
+#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X
+#define IXGBE_I2C_BB_EN_X550 0x00000100
+#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550
+#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN)
+
+#define IXGBE_I2C_CLK_OE_N_EN_8259X 0
+#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X
+#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550
+#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
+
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
@@ -835,15 +908,36 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GSCN_1 0x11024
#define IXGBE_GSCN_2 0x11028
#define IXGBE_GSCN_3 0x1102C
-#define IXGBE_FACTPS 0x10150
+#define IXGBE_FACTPS_8259X 0x10150
+#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X
+#define IXGBE_FACTPS_X550EM_a 0x15FEC
+#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS)
+
#define IXGBE_PCIEANACTL 0x11040
-#define IXGBE_SWSM 0x10140
-#define IXGBE_FWSM 0x10148
+#define IXGBE_SWSM_8259X 0x10140
+#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X
+#define IXGBE_SWSM_X550EM_a 0x15F70
+#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM)
+#define IXGBE_FWSM_8259X 0x10148
+#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X
+#define IXGBE_FWSM_X550EM_a 0x15F74
+#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM)
#define IXGBE_GSSR 0x10160
#define IXGBE_MREVID 0x11064
#define IXGBE_DCA_ID 0x11070
#define IXGBE_DCA_CTRL 0x11074
-#define IXGBE_SWFW_SYNC IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR
+#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X
+#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78
+#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC)
/* PCIe registers 82599-specific */
#define IXGBE_GCR_EXT 0x11050
@@ -855,14 +949,21 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_PHYDAT_82599 0x11044
#define IXGBE_PHYCTL_82599 0x11048
#define IXGBE_PBACLR_82599 0x11068
-#define IXGBE_CIAA_82599 0x11088
-#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_CIAA_X550 0x11508
-#define IXGBE_CIAD_X550 0x11510
-#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAA_X550 : IXGBE_CIAA_82599))
-#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \
- IXGBE_CIAD_X550 : IXGBE_CIAD_82599))
+
+#define IXGBE_CIAA_8259X 0x11088
+#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X
+#define IXGBE_CIAA_X550 0x11508
+#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550
+#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550
+#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA)
+
+#define IXGBE_CIAD_8259X 0x1108C
+#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X
+#define IXGBE_CIAD_X550 0x11510
+#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550
+#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550
+#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD)
+
#define IXGBE_PICAUSE 0x110B0
#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
@@ -1204,18 +1305,37 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */
#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */
#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
+#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */
#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */
#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */
#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */
#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */
+#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */
+#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */
+#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */
+/* autoneg vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000
+#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */
+#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */
+#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */
+#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */
+#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */
#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */
#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */
@@ -1233,6 +1353,8 @@ struct ixgbe_thermal_sensor_data {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
+#define X557_PHY_ID 0x01540240
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
#define AQ_FW_REV 0x20
@@ -1253,9 +1375,25 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_CONTROL_SOL_NL 0x0000
/* General purpose Interrupt Enable */
-#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
-#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
-#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */
+#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */
+#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */
+#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */
+#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540
+#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540
+#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540
+#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN)
+#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN)
+#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN)
+
#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
@@ -1417,9 +1555,25 @@ enum {
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
-#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */
+#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */
+#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */
+#define IXGBE_EICR_GPI_SDP0_X540 0x02000000
+#define IXGBE_EICR_GPI_SDP1_X540 0x04000000
+#define IXGBE_EICR_GPI_SDP2_X540 0x08000000
+#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540
+#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540
+#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540
+#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0)
+#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1)
+#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2)
+
#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */
#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
@@ -1435,9 +1589,9 @@ enum {
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
@@ -1454,9 +1608,9 @@ enum {
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
@@ -1472,9 +1626,9 @@ enum {
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw)
+#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw)
+#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw)
#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */
#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
@@ -2741,6 +2895,37 @@ union ixgbe_atr_hash_dword {
__be32 dword;
};
+#define IXGBE_MVALS_INIT(m) \
+ IXGBE_CAT(EEC, m), \
+ IXGBE_CAT(FLA, m), \
+ IXGBE_CAT(GRC, m), \
+ IXGBE_CAT(SRAMREL, m), \
+ IXGBE_CAT(FACTPS, m), \
+ IXGBE_CAT(SWSM, m), \
+ IXGBE_CAT(SWFW_SYNC, m), \
+ IXGBE_CAT(FWSM, m), \
+ IXGBE_CAT(SDP0_GPIEN, m), \
+ IXGBE_CAT(SDP1_GPIEN, m), \
+ IXGBE_CAT(SDP2_GPIEN, m), \
+ IXGBE_CAT(EICR_GPI_SDP0, m), \
+ IXGBE_CAT(EICR_GPI_SDP1, m), \
+ IXGBE_CAT(EICR_GPI_SDP2, m), \
+ IXGBE_CAT(CIAA, m), \
+ IXGBE_CAT(CIAD, m), \
+ IXGBE_CAT(I2C_CLK_IN, m), \
+ IXGBE_CAT(I2C_CLK_OUT, m), \
+ IXGBE_CAT(I2C_DATA_IN, m), \
+ IXGBE_CAT(I2C_DATA_OUT, m), \
+ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \
+ IXGBE_CAT(I2C_BB_EN, m), \
+ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \
+ IXGBE_CAT(I2CCTL, m)
+
+enum ixgbe_mvals {
+ IXGBE_MVALS_INIT(IDX),
+ IXGBE_MVALS_IDX_LIMIT
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -3112,6 +3297,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
s32 (*check_overtemp)(struct ixgbe_hw *);
+ s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+ s32 (*handle_lasi)(struct ixgbe_hw *hw);
};
struct ixgbe_eeprom_info {
@@ -3173,6 +3360,7 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
+ u32 nw_mng_if_sel;
};
#include "ixgbe_mbx.h"
@@ -3216,6 +3404,7 @@ struct ixgbe_hw {
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
struct ixgbe_mbx_info mbx;
+ const u32 *mvals;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -3234,6 +3423,7 @@ struct ixgbe_info {
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
struct ixgbe_mbx_operations *mbx_ops;
+ const u32 *mvals;
};
@@ -3339,4 +3529,6 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
+#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f5f948d08b43..032a5870abd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -202,7 +202,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -504,8 +504,8 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
return status;
}
- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP;
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == 0)
@@ -514,11 +514,11 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
hw_dbg(hw, "Flash update time out\n");
if (hw->revision_id == 0) {
- flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+ flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+ IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
@@ -544,7 +544,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
u32 reg;
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
- reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+ reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (reg & IXGBE_EEC_FLUDONE)
return 0;
udelay(5);
@@ -580,10 +580,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
break;
} else {
@@ -605,13 +605,13 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* corresponding FW/HW bits in the SW_FW_SYNC register.
*/
if (i >= timeout) {
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (swfw_sync & (fwmask | hwmask)) {
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
}
}
@@ -635,9 +635,9 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
ixgbe_get_swfw_sync_semaphore(hw);
- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
swfw_sync &= ~swmask;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
usleep_range(5000, 10000);
@@ -660,7 +660,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
if (!(swsm & IXGBE_SWSM_SMBI))
break;
usleep_range(50, 100);
@@ -674,7 +674,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Now get the semaphore between SW/FW through the REGSMP bit */
for (i = 0; i < timeout; i++) {
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
if (!(swsm & IXGBE_SWFW_REGSMP))
return 0;
@@ -696,13 +696,13 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
- swsm &= ~IXGBE_SWSM_SMBI;
- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
swsm &= ~IXGBE_SWFW_REGSMP;
- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm);
+
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
+ swsm &= ~IXGBE_SWSM_SMBI;
+ IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
IXGBE_WRITE_FLUSH(hw);
}
@@ -850,9 +850,14 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
+ .set_phy_power = &ixgbe_set_copper_phy_power,
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
};
+static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X540)
+};
+
struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
@@ -860,4 +865,5 @@ struct ixgbe_info ixgbe_X540_info = {
.eeprom_ops = &eeprom_ops_X540,
.phy_ops = &phy_ops_X540,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X540,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index cf5cf819a6b8..7581da13e92a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,22 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
+/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
+ * @hw: pointer to hardware structure
+ **/
+static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
+{
+ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+ if (hw->bus.lan_id) {
+ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
+ esdp |= IXGBE_ESDP_SDP1_DIR;
+ }
+ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+}
+
/** ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -33,18 +49,11 @@
*/
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
- u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
-
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
- if (hw->bus.lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
- }
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ ixgbe_setup_mux_ctl(hw);
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
@@ -90,7 +99,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
- eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
@@ -103,6 +112,39 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Return: failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ */
+static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+ u32 i, command;
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The SB IOSF BUSY bit will clear when the operation is
+ * complete.
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+ if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
+ break;
+ usleep_range(10, 20);
+ }
+ if (ctrl)
+ *ctrl = command;
+ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+ hw_dbg(hw, "IOSF wait timed out\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
+}
+
/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
* IOSF device
* @hw: pointer to hardware structure
@@ -113,7 +155,17 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
- u32 i, command, error;
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+ if (ret)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret)
+ goto out;
command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
(device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -121,17 +173,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
/* Write IOSF control register */
IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
- /* Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usleep_range(10, 20);
-
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
+ ret = ixgbe_iosf_wait(hw, &command);
if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -140,14 +182,12 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
return IXGBE_ERR_PHY;
}
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- hw_dbg(hw, "Read timed out\n");
- return IXGBE_ERR_PHY;
- }
-
- *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+ if (!ret)
+ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
- return 0;
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
}
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
@@ -673,6 +713,249 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
return status;
}
+/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
+ * IOSF device
+ *
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u32 data)
+{
+ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+ u32 command, error;
+ s32 ret;
+
+ ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+ if (ret)
+ return ret;
+
+ ret = ixgbe_iosf_wait(hw, NULL);
+ if (ret)
+ goto out;
+
+ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
+ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+
+ /* Write IOSF control register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+
+ /* Write IOSF data register */
+ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+
+ ret = ixgbe_iosf_wait(hw, &command);
+
+ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
+ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
+ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
+ hw_dbg(hw, "Failed to write, error %x\n", error);
+ return IXGBE_ERR_PHY;
+ }
+
+out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return ret;
+}
+
+/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated KR PHY to use iXFI mode. Used to connect an
+ * internal and external PHY at a specific speed, without autonegotiation.
+ **/
+static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ s32 status;
+ u32 reg_val;
+
+ /* Disable AN and force speed to 10G Serial. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal KR PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable training protocol FSM. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Disable Flex from training TXFFE. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
+ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Enable override for coefficients. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
+ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ if (status)
+ return status;
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_read_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status)
+ return status;
+
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = ixgbe_write_iosf_sb_reg_x550(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ return status;
+}
+
+/**
+ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Setup internal/external PHY link speed based on link speed, then set
+ * external PHY auto advertised link speed.
+ *
+ * Returns error status for any failure
+ **/
+static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ s32 status;
+ ixgbe_link_speed force_speed;
+
+ /* Setup internal/external PHY link speed to iXFI (10G), unless
+ * only 1G is auto advertised then setup KX link.
+ */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ force_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ force_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* If internal link mode is XFI, then setup XFI internal link. */
+ if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
+
+ if (status)
+ return status;
+ }
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/** ixgbe_check_link_t_X550em - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Check that both the MAC and X557 external PHY have link.
+ **/
+static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool link_up_wait_to_complete)
+{
+ u32 status;
+ u16 autoneg_status;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
+
+ status = ixgbe_check_mac_link_generic(hw, speed, link_up,
+ link_up_wait_to_complete);
+
+ /* If check link fails or MAC link is not up, then return */
+ if (status || !(*link_up))
+ return status;
+
+ /* MAC link is up, so check external PHY link.
+ * Read this twice back to back to indicate current status.
+ */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (status)
+ return status;
+
+ /* If external PHY link is not up, then indicate link not up */
+ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
+ *link_up = false;
+
+ return 0;
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -680,13 +963,21 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
- /* CS4227 does not support autoneg, so disable the laser control
- * functions for SFP+ fiber
- */
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ /* CS4227 does not support autoneg, so disable the laser control
+ * functions for SFP+ fiber
+ */
mac->ops.disable_tx_laser = NULL;
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
+ break;
+ case ixgbe_media_type_copper:
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ break;
+ default:
+ break;
}
}
@@ -778,169 +1069,221 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
return 0;
}
-/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
- * IOSF device
+/**
+ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * @hw: pointer to hardware structure
+ * @lsc: pointer to boolean flag which indicates whether external Base T
+ * PHY interrupt is lsc
*
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Data to write to the register
+ * Determime if external Base T PHY interrupt cause is high temperature
+ * failure alarm or link status change.
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
+static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
{
- u32 i, command, error;
+ u32 status;
+ u16 reg;
- command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
- (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
+ *lsc = false;
- /* Write IOSF control register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
+ /* Vendor alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
- /* Write IOSF data register */
- IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
+ if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
+ return status;
- /* Check every 10 usec to see if the address cycle completed.
- * The SB IOSF BUSY bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- usleep_range(10, 20);
+ /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
- command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
- if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
- break;
- }
+ if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
+ return status;
- if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
- error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
- IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
- hw_dbg(hw, "Failed to write, error %x\n", error);
- return IXGBE_ERR_PHY;
- }
+ /* High temperature failure alarm triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
- if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
- hw_dbg(hw, "Write timed out\n");
- return IXGBE_ERR_PHY;
+ if (status)
+ return status;
+
+ /* If high temperature failure, then return over temp error and exit */
+ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
+ /* power down the PHY in case the PHY FW didn't already */
+ ixgbe_set_copper_phy_power(hw, false);
+ return IXGBE_ERR_OVERTEMP;
}
+ /* Vendor alarm 2 triggered */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
+ return status;
+
+ /* link connect/disconnect event occurred */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
+
+ if (status)
+ return status;
+
+ /* Indicate LSC */
+ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
+ *lsc = true;
+
return 0;
}
-/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
- * @hw: pointer to hardware structure
- * @speed: the link speed to force
+/**
+ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
+ * @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY to use iXFI mode. Used to connect an
- * internal and external PHY at a specific speed, without autonegotiation.
+ * Enable link status change and temperature failure alarm for the external
+ * Base T PHY
+ *
+ * Returns PHY access status
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
- u32 reg_val;
+ u32 status;
+ u16 reg;
+ bool lsc;
- /* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Clear interrupt flags */
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
+
+ /* Enable link status change alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
if (status)
return status;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
- /* Select forced link speed for internal PHY. */
- switch (*speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
- break;
- default:
- /* Other link speeds are not supported by internal KR PHY. */
- return IXGBE_ERR_LINK_SETUP;
- }
-
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
if (status)
return status;
- /* Disable training protocol FSM. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Enables high temperature failure alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
if (status)
return status;
- reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- if (status)
- return status;
+ reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN;
- /* Disable Flex from training TXFFE. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
if (status)
return status;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
if (status)
return status;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status)
- return status;
+ reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
+ IXGBE_MDIO_GLOBAL_ALARM_1_INT);
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
- reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
if (status)
return status;
- /* Enable override for coefficients. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ /* Enable chip-wide vendor alarm */
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
if (status)
return status;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
- reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
+
+ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+
+ return status;
+}
+
+/**
+ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
+ * @hw: pointer to hardware structure
+ *
+ * Handle external Base T PHY interrupt. If high temperature
+ * failure alarm then return error, else if link status change
+ * then setup internal/external PHY link
+ *
+ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
+ * failure alarm, else return PHY access status.
+ **/
+static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+{
+ struct ixgbe_phy_info *phy = &hw->phy;
+ bool lsc;
+ u32 status;
+
+ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
if (status)
return status;
- /* Toggle port SW reset by AN reset. */
+ if (lsc)
+ return phy->ops.setup_internal_link(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
+ * @hw: pointer to hardware structure
+ * @speed: link speed
+ *
+ * Configures the integrated KR PHY.
+ **/
+static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u32 reg_val;
+
status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
+ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
+ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+
+ /* Advertise 10G support. */
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+
+ /* Advertise 1G support. */
+ if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+
+ /* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
@@ -990,85 +1333,82 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- s32 status;
- u32 reg_val;
+ return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
+}
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status)
- return status;
+/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
+ * @hw: address of hardware structure
+ * @link_up: address of boolean to indicate link status
+ *
+ * Returns error code if unable to get link status.
+ **/
+static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+{
+ u32 ret;
+ u16 autoneg_status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
- reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
+ *link_up = false;
- /* Advertise 10G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
+ /* read this twice back to back to indicate current status */
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret)
+ return ret;
- /* Advertise 1G support. */
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
+ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
+ if (ret)
+ return ret;
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
- return status;
+ return 0;
}
-/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY
+/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
* @hw: point to hardware structure
*
- * Configures the integrated KR PHY to talk to the external PHY. The base
- * driver will call this function when it gets notification via interrupt from
- * the external PHY. This function forces the internal PHY into iXFI mode at
- * the correct speed.
+ * Configures the link between the integrated KR PHY and the external X557 PHY
+ * The driver will call this function when it gets a link status change
+ * interrupt from the X557 PHY. This function configures the link speed
+ * between the PHYs to match the link speed of the BASE-T link.
*
- * A return of a non-zero value indicates an error, and the base driver should
- * not report link up.
+ * A return of a non-zero value indicates an error, and the base driver should
+ * not report link up.
**/
-static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
+static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
- u32 status;
- u16 lasi, autoneg_status, speed;
ixgbe_link_speed force_speed;
+ bool link_up;
+ u32 status;
+ u16 speed;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
+ return IXGBE_ERR_CONFIG;
- /* Verify that the external link status has changed */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi);
+ /* If link is not up, then there is no setup necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
- /* If there was no change in link status, we can just exit */
- if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM))
+ if (!link_up)
return 0;
- /* we read this twice back to back to indicate current status */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ &speed);
if (status)
return status;
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ /* If link is not still up, then no setup is necessary so return */
+ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status)
return status;
- /* If link is not up return an error indicating treat link as down */
- if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &speed);
+ if (!link_up)
+ return 0;
/* clear everything but the speed and duplex bits */
speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
@@ -1088,6 +1428,22 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
return ixgbe_setup_ixfi_x550em(hw, &force_speed);
}
+/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_reset_phy_generic(hw);
+
+ if (status)
+ return status;
+
+ /* Configure Link Status Alarm and Temperature Threshold interrupts */
+ return ixgbe_enable_lasi_ext_t_x550em(hw);
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1098,25 +1454,32 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
+ ixgbe_link_speed speed;
s32 ret_val;
- u32 esdp;
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
- phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ hw->mac.ops.set_lan_id(hw);
- if (hw->bus.lan_id) {
- esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
- esdp |= IXGBE_ESDP_SDP1_DIR;
+ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ ixgbe_setup_mux_ctl(hw);
+
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal PHY mode is KR, then initialize KR link */
+ if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
}
- esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- /* Setup function pointers based on detected SFP module and speeds */
+ /* Setup function pointers based on detected hardware */
ixgbe_init_mac_link_ops_X550em(hw);
if (phy->sfp_type != ixgbe_sfp_type_unknown)
phy->ops.reset = NULL;
@@ -1134,11 +1497,30 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
case ixgbe_phy_x550em_ext_t:
- phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em;
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode
+ */
+ phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If internal link mode is XFI, then setup iXFI internal link,
+ * else setup KR now.
+ */
+ if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ phy->ops.setup_internal_link =
+ ixgbe_setup_internal_phy_t_x550em;
+ } else {
+ speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
+ }
+
+ phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
+ phy->ops.reset = ixgbe_reset_phy_t_X550em;
break;
default:
break;
}
+
return ret_val;
}
@@ -1177,67 +1559,37 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
**/
static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- u32 status;
+ s32 status;
u16 reg;
- u32 retries = 2;
-
- do {
- /* decrement retries counter and exit if we hit 0 */
- if (retries < 1) {
- hw_dbg(hw, "External PHY not yet finished resetting.");
- return IXGBE_ERR_PHY;
- }
- retries--;
-
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_TX_VENDOR_ALARMS_3,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &reg);
- if (status)
- return status;
- /* Verify PHY FW reset has completed */
- } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1);
-
- /* Set port to low power mode */
status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
- if (status)
- return status;
-
- /* Enable the transmitter */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
IXGBE_MDIO_PMA_PMD_DEV_TYPE,
&reg);
if (status)
return status;
- reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE;
+ /* If PHY FW reset completed bit is set then this is the first
+ * SW instance after a power on so the PHY FW must be un-stalled.
+ */
+ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ &reg);
+ if (status)
+ return status;
- status = hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- reg);
- if (status)
- return status;
+ reg &= ~IXGBE_MDIO_POWER_UP_STALL;
- /* Un-stall the PHY FW */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- &reg);
- if (status)
- return status;
-
- reg &= ~IXGBE_MDIO_POWER_UP_STALL;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_GLOBAL_RES_PR_10,
+ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+ reg);
+ if (status)
+ return status;
+ }
- status = hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_GLOBAL_RES_PR_10,
- IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
- reg);
return status;
}
@@ -1254,6 +1606,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
+ u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -1338,6 +1691,15 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ }
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ ixgbe_setup_mux_ctl(hw);
+
return status;
}
@@ -1490,6 +1852,10 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
+ .read_reg = &ixgbe_read_phy_reg_generic, \
+ .write_reg = &ixgbe_write_phy_reg_generic, \
+ .setup_link = &ixgbe_setup_phy_link_generic, \
+ .set_phy_power = &ixgbe_set_copper_phy_power, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
@@ -1497,9 +1863,6 @@ static struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
- .read_reg = &ixgbe_read_phy_reg_generic,
- .write_reg = &ixgbe_write_phy_reg_generic,
- .setup_link = &ixgbe_setup_phy_link_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
};
@@ -1508,9 +1871,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
- .read_reg = NULL, /* defined later */
- .write_reg = NULL, /* defined later */
- .setup_link = NULL, /* defined later */
+};
+
+static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550)
+};
+
+static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_x)
};
struct ixgbe_info ixgbe_X550_info = {
@@ -1520,6 +1888,7 @@ struct ixgbe_info ixgbe_X550_info = {
.eeprom_ops = &eeprom_ops_X550,
.phy_ops = &phy_ops_X550,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550,
};
struct ixgbe_info ixgbe_X550EM_x_info = {
@@ -1529,4 +1898,5 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.eeprom_ops = &eeprom_ops_X550EM_x,
.phy_ops = &phy_ops_X550EM_x,
.mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_X550EM_x,
};
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1c75829eb166..d52639bc491f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- init_timer(&mp->mib_counters_timer);
- mp->mib_counters_timer.data = (unsigned long)mp;
- mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+ setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+ (unsigned long)mp);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- init_timer(&mp->rx_oom);
- mp->rx_oom.data = (unsigned long)mp;
- mp->rx_oom.function = oom_timer_wrapper;
+ setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ce5f7f9cff06..ecce8261ce3b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1359,7 +1359,7 @@ static void *mvneta_frag_alloc(const struct mvneta_port *pp)
static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
{
if (likely(pp->frag_size <= PAGE_SIZE))
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 529ef0594b90..68ae765873a9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -882,7 +882,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
{
struct ib_smp *smp = inbox->buf;
u32 index;
- u8 port;
+ u8 port, slave_port;
u8 opcode_modifier;
u16 *table;
int err;
@@ -894,7 +894,8 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
__be32 slave_cap_mask;
__be64 slave_node_guid;
- port = vhcr->in_modifier;
+ slave_port = vhcr->in_modifier;
+ port = mlx4_slave_convert_port(dev, slave, slave_port);
/* network-view bit is for driver use only, and should not be passed to FW */
opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
@@ -930,8 +931,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
/*get the slave specific caps:*/
/*do the command */
+ smp->attr_mod = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
/* modify the response for slaves */
if (!err && slave != mlx4_master_func_num(dev)) {
@@ -975,7 +977,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
+ port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
@@ -2915,7 +2917,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
- mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+ mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
@@ -3197,6 +3199,12 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enabled)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+ &priv->dev, slave);
+ int min_port = find_first_bit(actv_ports.ports,
+ priv->dev.caps.num_ports) + 1;
+ int max_port = min_port - 1 +
+ bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
if (slave == mlx4_master_func_num(dev))
return 0;
@@ -3206,6 +3214,11 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
enabled < 0 || enabled > 1)
return -EINVAL;
+ if (min_port == max_port && dev->caps.num_ports > 1) {
+ mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
+ return -EPROTONOSUPPORT;
+ }
+
priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index e71f31387ac6..3348e646db70 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr;
int err;
- if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+ if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
- cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
+ cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
- &priv->eq_table.eq[cq->vector].tasklet_ctx;
+ &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+ synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+ if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+ synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 22da4d0d0f05..63769df872a4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
+ cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int err = 0;
char name[25];
int timestamp_en = 0;
- struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap;
-#else
- NULL;
-#endif
+ bool assigned_eq = false;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) {
- if (mdev->dev->caps.comp_pool) {
- if (!cq->vector) {
- sprintf(name, "%s-%d", priv->dev->name,
- cq->ring);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, rmap,
- &cq->vector)) {
- cq->vector = (cq->ring + 1 + priv->port)
- % mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
- name);
- }
-
+ if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+ cq->vector)) {
+ cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+ err = mlx4_assign_eq(mdev->dev, priv->port,
+ &cq->vector);
+ if (err) {
+ mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+ name);
+ goto free_eq;
}
- } else {
- cq->vector = (cq->ring + 1 + priv->port) %
- mdev->dev->caps.num_comp_vectors;
+
+ assigned_eq = true;
}
cq->irq_desc =
@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);
if (err)
- return err;
+ goto free_eq;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
- err = irq_set_affinity_hint(cq->mcq.irq,
- ring->affinity_mask);
- if (err)
- mlx4_warn(mdev, "Failed setting affinity hint\n");
-
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
napi_enable(&cq->napi);
return 0;
+
+free_eq:
+ if (assigned_eq)
+ mlx4_release_eq(mdev->dev, cq->vector);
+ cq->vector = mdev->dev->caps.num_comp_vectors;
+ return err;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+ cq->is_tx == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
- }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
@@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
- irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index cf467a9f6cc7..98efb5842fca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1954,7 +1954,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int i;
#ifdef CONFIG_RFS_ACCEL
- free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL;
#endif
@@ -2012,11 +2011,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- if (priv->mdev->dev->caps.comp_pool) {
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
- if (!priv->dev->rx_cpu_rmap)
- goto err;
- }
+ priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2a77a6b19121..35f726c17e48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool)
- num_of_eqs = max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS));
- else
- num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
- dev->caps.comp_pool/
- dev->caps.num_ports) - 1;
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ mlx4_get_eqs_per_port(mdev->dev, i),
+ DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2619c9fbf42d..aae13adfb492 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -145,7 +145,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
struct mlx4_eqe *eqe;
u8 slave;
- int i;
+ int i, phys_port, slave_port;
for (eqe = next_slave_event_eqe(slave_eq); eqe;
eqe = next_slave_event_eqe(slave_eq)) {
@@ -154,9 +154,20 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i <= dev->persist->num_vfs; i++) {
+ phys_port = 0;
+ if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
+ eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
+ phys_port = eqe->event.port_mgmt_change.port;
+ slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
+ if (slave_port < 0) /* VF doesn't have this port */
+ continue;
+ eqe->event.port_mgmt_change.port = slave_port;
+ }
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
+ if (phys_port)
+ eqe->event.port_mgmt_change.port = phys_port;
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -210,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+ int hint_err;
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+ if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ return;
+
+ hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ if (hint_err)
+ mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
@@ -224,7 +251,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -241,7 +268,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
- eqe.event.port_mgmt_change.port = port;
+ eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
@@ -251,6 +278,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
u8 port_subtype_change)
{
struct mlx4_eqe eqe;
+ u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
/*don't send if we don't have the that slave */
if (dev->persist->num_vfs < slave)
@@ -259,7 +287,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
eqe.subtype = port_subtype_change;
- eqe.event.port_change.port = cpu_to_be32(port << 28);
+ eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
port_subtype_change, slave, port);
@@ -589,6 +617,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
if (i == mlx4_master_func_num(dev))
continue;
+ eqe->event.port_change.port =
+ cpu_to_be32(
+ (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+ | (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
}
}
@@ -879,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
- return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
- dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+ dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1069,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int i, vec;
+ int i;
if (eq_table->have_irq)
free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
+ free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+ irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
- for (i = 0; i < dev->caps.comp_pool; i++) {
- /*
- * Freeing the assigned irq's
- * all bits should be 0, but we need to validate
- */
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- /* NO need protecting*/
- vec = dev->caps.num_comp_vectors + 1 + i;
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- }
- }
-
-
kfree(eq_table->irq_names);
}
@@ -1175,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
}
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
- dev->caps.comp_pool),
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
- goto err_out_bitmap;
+ goto err_out_clr_int;
}
- for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
- }
- }
-
- err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
- &priv->eq_table.eq[dev->caps.num_comp_vectors]);
- if (err)
- goto err_out_comp;
-
- /*if additional completion vectors poolsize is 0 this loop will not run*/
- for (i = dev->caps.num_comp_vectors + 1;
- i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i == MLX4_EQ_ASYNC) {
+ err = mlx4_create_eq(dev,
+ MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ } else {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+ int port = find_first_bit(eq->actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ if (port <= dev->caps.num_ports) {
+ struct mlx4_port_info *info =
+ &mlx4_priv(dev)->port[port];
+
+ if (!info->rmap) {
+ info->rmap = alloc_irq_cpu_rmap(
+ mlx4_get_eqs_per_port(dev, port));
+ if (!info->rmap) {
+ mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ }
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
+ err = irq_cpu_rmap_add(
+ info->rmap, eq->irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ?
+ i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+ eq);
}
+ if (err)
+ goto err_out_unmap;
}
-
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
- if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-comp-%d@pci:%s", i,
- pci_name(dev->persist->pdev));
- } else {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-async@pci:%s",
- pci_name(dev->persist->pdev));
- }
+ snprintf(priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->persist->pdev));
+ eq_name = priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
- eq_name = priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE;
- err = request_irq(priv->eq_table.eq[i].irq,
- mlx4_msi_x_interrupt, 0, eq_name,
- priv->eq_table.eq + i);
- if (err)
- goto err_out_async;
+ err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + MLX4_EQ_ASYNC);
+ if (err)
+ goto err_out_unmap;
- priv->eq_table.eq[i].have_irq = 1;
- }
+ priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
@@ -1253,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
- goto err_out_async;
+ goto err_out_unmap;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- eq_set_ci(&priv->eq_table.eq[i], 1);
+ /* arm ASYNC eq */
+ eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
-err_out_async:
- mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
- i = dev->caps.num_comp_vectors - 1;
-
err_out_unmap:
- while (i >= 0) {
- mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- --i;
+ while (i >= 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
}
+#endif
+ mlx4_free_irqs(dev);
+
+err_out_clr_int:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
- mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
@@ -1300,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
int i;
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
+ }
+#endif
mlx4_free_irqs(dev);
- for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (!mlx4_is_slave(dev))
@@ -1355,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Return to default */
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+ (vector == MLX4_EQ_ASYNC))
+ return false;
+
+ return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
struct mlx4_priv *priv = mlx4_priv(dev);
- int vec = 0, err = 0, i;
+ unsigned int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+ sum += !!test_bit(port - 1,
+ priv->eq_table.eq[i].actv_ports.ports);
+
+ return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+ return -EINVAL;
+
+ return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+ dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
+{
+ return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = 0, i = 0;
+ u32 min_ref_count_val = (u32)-1;
+ int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+ int *prequested_vector = NULL;
+
mutex_lock(&priv->msix_ctl.pool_lock);
- for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
- if (~priv->msix_ctl.pool_bm & 1ULL << i) {
- priv->msix_ctl.pool_bm |= 1ULL << i;
- vec = dev->caps.num_comp_vectors + 1 + i;
- snprintf(priv->eq_table.irq_names +
- vec * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
- if (rmap) {
- err = irq_cpu_rmap_add(rmap,
- priv->eq_table.eq[vec].irq);
- if (err)
- mlx4_warn(dev, "Failed adding irq rmap\n");
+ if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+ (requested_vector >= 0) &&
+ (requested_vector != MLX4_EQ_ASYNC)) {
+ if (test_bit(port - 1,
+ priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+ prequested_vector = &requested_vector;
+ } else {
+ struct mlx4_eq *eq;
+
+ for (i = 1; i < port;
+ requested_vector += mlx4_get_eqs_per_port(dev, i++))
+ ;
+
+ eq = &priv->eq_table.eq[requested_vector];
+ if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ prequested_vector = &requested_vector;
}
-#endif
- err = request_irq(priv->eq_table.eq[vec].irq,
- mlx4_msi_x_interrupt, 0,
- &priv->eq_table.irq_names[vec<<5],
- priv->eq_table.eq + vec);
- if (err) {
- /*zero out bit by fliping it*/
- priv->msix_ctl.pool_bm ^= 1 << i;
- vec = 0;
- continue;
- /*we dont want to break here*/
+ }
+ }
+
+ if (!prequested_vector) {
+ requested_vector = -1;
+ for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+ i++) {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+ if (min_ref_count_val > eq->ref_count &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ min_ref_count_val = eq->ref_count;
+ requested_vector = i;
}
+ }
+
+ if (requested_vector < 0) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ prequested_vector = &requested_vector;
+ }
+
+ if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+ dev->flags & MLX4_FLAG_MSI_X) {
+ set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ snprintf(priv->eq_table.irq_names +
+ *prequested_vector * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+ *prequested_vector, dev_name(&dev->persist->pdev->dev));
- eq_set_ci(&priv->eq_table.eq[vec], 1);
+ err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[*prequested_vector << 5],
+ priv->eq_table.eq + *prequested_vector);
+
+ if (err) {
+ clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ *prequested_vector = -1;
+ } else {
+#if defined(CONFIG_SMP)
+ mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+ eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+ priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
}
+
+ if (!err && *prequested_vector >= 0)
+ priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
mutex_unlock(&priv->msix_ctl.pool_lock);
- if (vec) {
- *vector = vec;
- } else {
+ if (!err && *prequested_vector >= 0)
+ *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+ else
*vector = 0;
- err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
- }
+
return err;
}
EXPORT_SYMBOL(mlx4_assign_eq);
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return priv->eq_table.eq[vec].irq;
+ return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- /*bm index*/
- int i = vec - dev->caps.num_comp_vectors - 1;
-
- if (likely(i >= 0)) {
- /*sanity check , making sure were not trying to free irq's
- Belonging to a legacy EQ*/
- mutex_lock(&priv->msix_ctl.pool_lock);
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- priv->msix_ctl.pool_bm &= ~(1ULL << i);
- }
- mutex_unlock(&priv->msix_ctl.pool_lock);
- }
+ int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
+ mutex_lock(&priv->msix_ctl.pool_lock);
+ priv->eq_table.eq[eq_vec].ref_count--;
+
+ /* once we allocated EQ, we don't release it because it might be binded
+ * to cpu_rmap.
+ */
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ced5ecab5aa7..7d57777e65c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -2481,14 +2481,45 @@ err_uar_table_free:
return err;
}
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+ int requested_cpu = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_eq *eq;
+ int off = 0;
+ int i;
+
+ if (eqn > dev->caps.num_comp_vectors)
+ return -EINVAL;
+
+ for (i = 1; i < port; i++)
+ off += mlx4_get_eqs_per_port(dev, i);
+
+ requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+ /* Meaning EQs are shared, and this call comes from the second port */
+ if (requested_cpu < 0)
+ return 0;
+
+ eq = &priv->eq_table.eq[eqn];
+
+ if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+ return 0;
+}
+
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int i;
+ int port = 0;
if (msi_x) {
- int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+ int nreq = dev->caps.num_ports * num_online_cpus() + 1;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
@@ -2503,20 +2534,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq);
- if (nreq < 0) {
+ if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries);
goto no_msi;
- } else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
- /*Working in legacy mode , all EQ's shared*/
- dev->caps.comp_pool = 0;
- dev->caps.num_comp_vectors = nreq - 1;
- } else {
- dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
- dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
}
- for (i = 0; i < nreq; ++i)
- priv->eq_table.eq[i].irq = entries[i].vector;
+ /* 1 is reserved for events (asyncrounous EQ) */
+ dev->caps.num_comp_vectors = nreq - 1;
+
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+ bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+ dev->caps.num_ports);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+ if (i == MLX4_EQ_ASYNC)
+ continue;
+
+ priv->eq_table.eq[i].irq =
+ entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ /* We don't set affinity hint when there
+ * aren't enough EQs
+ */
+ } else {
+ set_bit(port,
+ priv->eq_table.eq[i].actv_ports.ports);
+ if (mlx4_init_affinity_hint(dev, port + 1, i))
+ mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+ i);
+ }
+ /* We divide the Eqs evenly between the two ports.
+ * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+ * refers to the number of Eqs per port
+ * (i.e eqs_per_port). Theoretically, we would like to
+ * write something like (i + 1) % eqs_per_port == 0.
+ * However, since there's an asynchronous Eq, we have
+ * to skip over it by comparing this condition to
+ * !!((i + 1) > MLX4_EQ_ASYNC).
+ */
+ if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+ ((i + 1) %
+ (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+ !!((i + 1) > MLX4_EQ_ASYNC))
+ /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+ * everything is shared anyway.
+ */
+ port++;
+ }
dev->flags |= MLX4_FLAG_MSI_X;
@@ -2526,10 +2592,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi:
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
- for (i = 0; i < 2; ++i)
+ BUG_ON(MLX4_EQ_ASYNC >= 2);
+ for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+ if (i != MLX4_EQ_ASYNC) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ }
+ }
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2665,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(info->rmap);
+ info->rmap = NULL;
+#endif
}
static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2749,6 +2824,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
@@ -2900,6 +2976,7 @@ slave_start:
existing_vfs,
reset_flow);
+ mlx4_close_fw(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -2988,18 +3065,6 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- int ib_ports = 0;
-
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
- ib_ports++;
-
- if (ib_ports &&
- (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
- mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
- err = -EINVAL;
- goto err_close;
- }
if (dev->caps.num_ports < 2 &&
num_vfs_argc > 1) {
err = -EINVAL;
@@ -3036,7 +3101,7 @@ slave_start:
if (err)
goto err_master_mfunc;
- priv->msix_ctl.pool_bm = 0;
+ bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
@@ -3058,7 +3123,6 @@ slave_start:
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 502d3dd2c888..f424900d23a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -287,6 +287,12 @@ struct mlx4_icm_table {
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
+#define MLX4_EQ_ASYNC 0
+#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -391,6 +397,9 @@ struct mlx4_eq {
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx;
+ struct mlx4_active_ports actv_ports;
+ u32 ref_count;
+ cpumask_var_t affinity_mask;
};
struct mlx4_slave_eqe {
@@ -808,6 +817,7 @@ struct mlx4_port_info {
struct mlx4_vlan_table vlan_table;
struct mlx4_roce_gid_table gid_table;
int base_qpn;
+ struct cpu_rmap *rmap;
};
struct mlx4_sense {
@@ -818,7 +828,7 @@ struct mlx4_sense {
};
struct mlx4_msix_ctl {
- u64 pool_bm;
+ DECLARE_BITMAP(pool_bm, MAX_MSIX);
struct mutex pool_lock;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d021f079f181..edd8fd69ec9a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -338,7 +338,7 @@ struct mlx4_en_cq {
struct napi_struct napi;
int size;
int buf_size;
- unsigned vector;
+ int vector;
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 2bf437aafc53..bae8b22edbb7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -82,7 +82,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
u64 total_size = 0;
struct mlx4_resource *profile;
- struct mlx4_resource tmp;
struct sysinfo si;
int i, j;
@@ -149,11 +148,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
*/
for (i = MLX4_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
- if (profile[j].size > profile[j - 1].size) {
- tmp = profile[j];
- profile[j] = profile[j - 1];
- profile[j - 1] = tmp;
- }
+ if (profile[j].size > profile[j - 1].size)
+ swap(profile[j], profile[j - 1]);
}
for (i = 0; i < MLX4_RES_NUM; ++i) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b75214a80d0e..20268634a9ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -749,7 +749,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{
int sort[MLX4_NUM_QP_REGION];
- int i, j, tmp;
+ int i, j;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
@@ -758,11 +758,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
- dev->caps.reserved_qps_cnt[sort[j - 1]]) {
- tmp = sort[j];
- sort[j] = sort[j - 1];
- sort[j - 1] = tmp;
- }
+ dev->caps.reserved_qps_cnt[sort[j - 1]])
+ swap(sort[j], sort[j - 1]);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index bafe2180cf0c..ab48386bfefc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2703,6 +2703,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
context->qkey = cpu_to_be32(qkey);
}
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+ struct mlx4_qp_context *qpc,
+ struct mlx4_cmd_mailbox *inbox);
+
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -2725,6 +2729,10 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct res_srq *srq;
int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+ if (err)
+ return err;
+
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
@@ -3526,8 +3534,8 @@ static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
((port & 1) << 6);
- if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
- mlx4_is_eth(dev, port + 1)) {
+ if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+ qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
qpc->pri_path.sched_queue = pri_sched_queue;
}
@@ -3965,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4117,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+ if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+ handle_eth_header_mcast_prio(ctrl, rule_header);
+
+ if (slave == dev->caps.function)
+ goto execute;
+
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4143,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put;
}
+execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 8ff57e8e3e91..158c88c69ef9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,6 +3,18 @@
#
config MLX5_CORE
- tristate
+ tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on PCI
default n
+ ---help---
+ Core driver for low level functionality of the ConnectX-4 and
+ Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+ bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ default n
+ ---help---
+ Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+ Ethernet and Infiniband support in ConnectX-4 are currently mutually
+ exclusive.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 105780bb980b..26a68b8af2c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o
+ mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
+ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+ en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf4be95..0715b497511f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -42,95 +42,36 @@
#include "mlx5_core.h"
/* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
dma_addr_t t;
buf->size = size;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; i++) {
- buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; i++)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
- }
+ buf->npages = 1;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- return 0;
+ buf->direct.map = t;
-err_free:
- mlx5_buf_free(dev, buf);
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- return -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
-
- for (i = 0; i < buf->nbufs; i++)
- if (buf->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- }
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+ buf->direct.map);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
int i;
for (i = 0; i < buf->npages; i++) {
- if (buf->nbufs == 1)
- addr = buf->direct.map + (i << buf->page_shift);
- else
- addr = buf->page_list[i].map;
+ addr = buf->direct.map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e3273faf4568..75ff58dc1ff5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -75,25 +75,6 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
-enum {
- MLX5_CMD_STAT_OK = 0x0,
- MLX5_CMD_STAT_INT_ERR = 0x1,
- MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
- MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
- MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
- MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
- MLX5_CMD_STAT_RES_BUSY = 0x6,
- MLX5_CMD_STAT_LIM_ERR = 0x8,
- MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
- MLX5_CMD_STAT_IX_ERR = 0xa,
- MLX5_CMD_STAT_NO_RES_ERR = 0xf,
- MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
- MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
- MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
- MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
- MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
-};
-
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ARM_RQ:
return "ARM_RQ";
- case MLX5_CMD_OP_RESIZE_SRQ:
- return "RESIZE_SRQ";
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return "CREATE_XRC_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ return "DESTROY_XRC_SRQ";
+
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ return "QUERY_XRC_SRQ";
+
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ return "ARM_XRC_SRQ";
case MLX5_CMD_OP_ALLOC_PD:
return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ATTACH_TO_MCG:
return "ATTACH_TO_MCG";
- case MLX5_CMD_OP_DETACH_FROM_MCG:
- return "DETACH_FROM_MCG";
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ return "DETTACH_FROM_MCG";
case MLX5_CMD_OP_ALLOC_XRCD:
return "ALLOC_XRCD";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index eb0cf81f5f45..04ab7e445eae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644
index 000000000000..e14120eccf04
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/vport.h>
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC 8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+
+#define MLX5E_TX_CQ_POLL_BUDGET 128
+#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+ /* vport statistics */
+ "rx_packets",
+ "rx_bytes",
+ "tx_packets",
+ "tx_bytes",
+ "rx_error_packets",
+ "rx_error_bytes",
+ "tx_error_packets",
+ "tx_error_bytes",
+ "rx_unicast_packets",
+ "rx_unicast_bytes",
+ "tx_unicast_packets",
+ "tx_unicast_bytes",
+ "rx_multicast_packets",
+ "rx_multicast_bytes",
+ "tx_multicast_packets",
+ "tx_multicast_bytes",
+ "rx_broadcast_packets",
+ "rx_broadcast_bytes",
+ "tx_broadcast_packets",
+ "tx_broadcast_bytes",
+
+ /* SW counters */
+ "tso_packets",
+ "tso_bytes",
+ "lro_packets",
+ "lro_bytes",
+ "rx_csum_good",
+ "rx_csum_none",
+ "tx_csum_offload",
+ "tx_queue_stopped",
+ "tx_queue_wake",
+ "tx_queue_dropped",
+ "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+ /* HW counters */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_error_packets;
+ u64 rx_error_bytes;
+ u64 tx_error_packets;
+ u64 tx_error_bytes;
+ u64 rx_unicast_packets;
+ u64 rx_unicast_bytes;
+ u64 tx_unicast_packets;
+ u64 tx_unicast_bytes;
+ u64 rx_multicast_packets;
+ u64 rx_multicast_bytes;
+ u64 tx_multicast_packets;
+ u64 tx_multicast_bytes;
+ u64 rx_broadcast_packets;
+ u64 rx_broadcast_bytes;
+ u64 tx_broadcast_packets;
+ u64 tx_broadcast_bytes;
+
+ /* SW counters */
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 rx_csum_good;
+ u64 rx_csum_none;
+ u64 tx_csum_offload;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS 31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "csum_none",
+ "lro_packets",
+ "lro_bytes",
+ "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "tso_packets",
+ "tso_bytes",
+ "csum_offload_none",
+ "stopped",
+ "wake",
+ "dropped",
+ "nop"
+};
+
+struct mlx5e_sq_stats {
+ u64 packets;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 csum_offload_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+ u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+ struct mlx5e_vport_stats vport;
+};
+
+struct mlx5e_params {
+ u8 log_sq_size;
+ u8 log_rq_size;
+ u16 num_channels;
+ u8 default_vlan_prio;
+ u8 num_tc;
+ u16 rx_cq_moderation_usec;
+ u16 rx_cq_moderation_pkts;
+ u16 tx_cq_moderation_usec;
+ u16 tx_cq_moderation_pkts;
+ u16 min_rx_wqes;
+ u16 rx_hash_log_tbl_sz;
+ bool lro_en;
+ u32 lro_wqe_sz;
+};
+
+enum {
+ MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+ MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+ void *sqrq;
+ unsigned long flags;
+
+ /* data path - accessed per napi poll */
+ struct napi_struct *napi;
+ struct mlx5_core_cq mcq;
+ struct mlx5e_channel *channel;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+ /* data path */
+ struct mlx5_wq_ll wq;
+ u32 wqe_sz;
+ struct sk_buff **skb;
+
+ struct device *pdev;
+ struct net_device *netdev;
+ struct mlx5e_rq_stats stats;
+ struct mlx5e_cq cq;
+
+ unsigned long state;
+ int ix;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+ u32 rqn;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+};
+
+enum {
+ MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u32 bf_offset;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* pointers to per packet info: write@xmit, read@completion */
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u32 bf_buf_size;
+ u16 max_inline;
+ u16 edge;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_uar uar;
+ struct mlx5e_channel *channel;
+ int tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+ return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+ (sq->cc == sq->pc));
+}
+
+enum channel_flags {
+ MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+ u8 num_tc;
+ unsigned long flags;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ int ix;
+ int cpu;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP = 0,
+ MLX5E_TT_IPV6_TCP = 1,
+ MLX5E_TT_IPV4_UDP = 2,
+ MLX5E_TT_IPV6_UDP = 3,
+ MLX5E_TT_IPV4 = 4,
+ MLX5E_TT_IPV6 = 5,
+ MLX5E_TT_ANY = 6,
+ MLX5E_NUM_TT = 7,
+};
+
+enum {
+ MLX5E_RQT_SPREADING = 0,
+ MLX5E_RQT_DEFAULT_RQ = 1,
+ MLX5E_NUM_RQT = 2,
+};
+
+struct mlx5e_eth_addr_info {
+ u8 addr[ETH_ALEN + 2];
+ u32 tt_vec;
+ u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+ struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_info broadcast;
+ struct mlx5e_eth_addr_info allmulti;
+ struct mlx5e_eth_addr_info promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 active_vlans_ft_ix[VLAN_N_VID];
+ u32 untagged_rule_ft_ix;
+ u32 any_vlan_rule_ft_ix;
+ bool filter_disabled;
+};
+
+struct mlx5e_flow_table {
+ void *vlan;
+ void *main;
+};
+
+struct mlx5e_priv {
+ /* priv data path fields - start */
+ int order_base_2_num_channels;
+ int queue_mapping_channel_mask;
+ int num_tc;
+ int default_vlan_prio;
+ /* priv data path fields - end */
+
+ unsigned long state;
+ struct mutex state_lock; /* Protects Interface state */
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ u32 tdn;
+ struct mlx5_core_mr mr;
+
+ struct mlx5e_channel **channel;
+ u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 rqtn;
+ u32 tirn[MLX5E_NUM_TT];
+
+ struct mlx5e_flow_table ft;
+ struct mlx5e_eth_addr_db eth_addr;
+ struct mlx5e_vlan_db vlan;
+
+ struct mlx5e_params params;
+ spinlock_t async_events_spinlock; /* sync hw events */
+ struct work_struct update_carrier_work;
+ struct work_struct set_rx_mode_work;
+ struct delayed_work update_stats_work;
+
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_stats stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_100BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+ struct mlx5e_tx_wqe *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)&wqe->ctrl,
+ sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+ NULL);
+
+ sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644
index 000000000000..388938482ff9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_1000BASE_KX] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_10GBASE_CX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_20GBASE_KR2] = {
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ [MLX5E_40GBASE_CR4] = {
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_KR4] = {
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_56GBASE_R4] = {
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ [MLX5E_10GBASE_CR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_SR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_ER] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_40GBASE_SR4] = {
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_LR4] = {
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_100GBASE_CR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_SR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_KR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_LR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100BASE_TX] = {
+ .speed = 100,
+ },
+ [MLX5E_100BASE_T] = {
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ [MLX5E_10GBASE_T] = {
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 1000,
+ },
+ [MLX5E_25GBASE_CR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_KR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_SR] = {
+ .speed = 25000,
+ },
+ [MLX5E_50GBASE_CR2] = {
+ .speed = 50000,
+ },
+ [MLX5E_50GBASE_KR2] = {
+ .speed = 50000,
+ },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_COUNTERS +
+ priv->params.num_channels * NUM_RQ_STATS +
+ priv->params.num_channels * priv->num_tc *
+ NUM_SQ_STATS;
+ /* fallthrough */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i, j, tc, idx = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ break;
+
+ case ETH_SS_TEST:
+ break;
+
+ case ETH_SS_STATS:
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_strings[i]);
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ "rx%d_%s", i, rq_stats_strings[j]);
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data +
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%d_%s", i, tc,
+ sq_stats_strings[j]);
+ break;
+ }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i, j, tc, idx = 0;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_stats(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+ param->rx_pending = 1 << priv->params.log_rq_size;
+ param->tx_pending = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params new_params;
+ u16 min_rx_wqes;
+ u8 log_rq_size;
+ u8 log_sq_size;
+ int err = 0;
+
+ if (param->rx_jumbo_pending) {
+ netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_mini_pending) {
+ netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+
+ log_rq_size = order_base_2(param->rx_pending);
+ log_sq_size = order_base_2(param->tx_pending);
+ min_rx_wqes = min_t(u16, param->rx_pending - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ if (log_rq_size == priv->params.log_rq_size &&
+ log_sq_size == priv->params.log_sq_size &&
+ min_rx_wqes == priv->params.min_rx_wqes)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.log_rq_size = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
+ new_params.min_rx_wqes = min_rx_wqes;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+ ch->max_combined = ncv;
+ ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ unsigned int count = ch->combined_count;
+ struct mlx5e_params new_params;
+ int err = 0;
+
+ if (!count) {
+ netdev_info(dev, "%s: combined_count=0 not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (ch->rx_count || ch->tx_count) {
+ netdev_info(dev, "%s: separate rx/tx count not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (count > ncv) {
+ netdev_info(dev, "%s: count (%d) > max (%d)\n",
+ __func__, count, ncv);
+ return -EINVAL;
+ }
+
+ if (priv->params.num_channels == count)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.num_channels = count;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+ return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channel *c;
+ int tc;
+ int i;
+
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+ for (i = 0; i < priv->params.num_channels; ++i) {
+ c = priv->channel[i];
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ mlx5_core_modify_cq_moderation(mdev,
+ &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 supported_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ supported_modes |= ptys2ethtool_table[i].supported;
+ }
+ return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 advertising_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ advertising_modes |= ptys2ethtool_table[i].advertised;
+ }
+ return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+ u32 eth_proto_oper,
+ struct ethtool_cmd *cmd)
+{
+ int i;
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+
+ if (!netif_carrier_ok(netdev))
+ goto out;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+ speed = ptys2ethtool_table[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+ *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+ *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+ *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+ u8 rx_pause, u32 *advertising)
+{
+ *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+ *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+ *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+ return PORT_NONE;
+ }
+
+ return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+ *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_lp;
+ u32 eth_proto_oper;
+ int err;
+
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+
+ if (err) {
+ netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ __func__, err);
+ goto err_query_ptys;
+ }
+
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ get_supported(eth_proto_cap, &cmd->supported);
+ get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+ get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+ cmd->port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+ cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+ return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+ u32 i, ptys_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].advertised & link_modes)
+ ptys_modes |= MLX5E_PROT_MASK(i);
+ }
+
+ return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+ u32 i, speed_links = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].speed == speed)
+ speed_links |= MLX5E_PROT_MASK(i);
+ }
+
+ return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 link_modes;
+ u32 speed;
+ u32 eth_proto_cap, eth_proto_admin;
+ u8 port_status;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ mlx5e_ethtool2ptys_speed_link(speed);
+
+ err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ link_modes = link_modes & eth_proto_cap;
+ if (!link_modes) {
+ netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (link_modes == eth_proto_admin)
+ goto out;
+
+ err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = mlx5_query_port_status(mdev, &port_status);
+ if (err)
+ goto out;
+
+ if (port_status == MLX5_PORT_DOWN)
+ return 0;
+
+ err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+ if (err)
+ goto out;
+ err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+ return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_drvinfo = mlx5e_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_get_strings,
+ .get_sset_count = mlx5e_get_sset_count,
+ .get_ethtool_stats = mlx5e_get_ethtool_stats,
+ .get_ringparam = mlx5e_get_ringparam,
+ .set_ringparam = mlx5e_set_ringparam,
+ .get_channels = mlx5e_get_channels,
+ .set_channels = mlx5e_set_channels,
+ .get_coalesce = mlx5e_get_coalesce,
+ .set_coalesce = mlx5e_set_coalesce,
+ .get_settings = mlx5e_get_settings,
+ .set_settings = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644
index 000000000000..120db80c47aa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+ MLX5E_FULLMATCH = 0,
+ MLX5E_ALLMULTI = 1,
+ MLX5E_PROMISC = 2,
+};
+
+enum {
+ MLX5E_UC = 0,
+ MLX5E_MC_IPV4 = 1,
+ MLX5E_MC_IPV6 = 2,
+ MLX5E_MC_OTHER = 3,
+};
+
+enum {
+ MLX5E_ACTION_NONE = 0,
+ MLX5E_ACTION_ADD = 1,
+ MLX5E_ACTION_DEL = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+ struct hlist_node hlist;
+ u8 action;
+ struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+ return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ int ix = mlx5e_hash_eth_addr(addr);
+ int found = 0;
+
+ hlist_for_each_entry(hn, &hash[ix], hlist)
+ if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+ found = 1;
+ break;
+ }
+
+ if (found) {
+ hn->action = MLX5E_ACTION_NONE;
+ return;
+ }
+
+ hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+ if (!hn)
+ return;
+
+ ether_addr_copy(hn->ai.addr, addr);
+ hn->action = MLX5E_ACTION_ADD;
+
+ hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+ hlist_del(&hn->hlist);
+ kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai)
+{
+ void *ft = priv->ft.main;
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+ if (is_unicast_ether_addr(addr))
+ return MLX5E_UC;
+
+ if ((addr[0] == 0x01) &&
+ (addr[1] == 0x00) &&
+ (addr[2] == 0x5e) &&
+ !(addr[3] & 0x80))
+ return MLX5E_MC_IPV4;
+
+ if ((addr[0] == 0x33) &&
+ (addr[1] == 0x33))
+ return MLX5E_MC_IPV6;
+
+ return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+ int eth_addr_type;
+ u32 ret;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+ switch (eth_addr_type) {
+ case MLX5E_UC:
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV4:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV6:
+ ret =
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV6) |
+ 0;
+ break;
+
+ case MLX5E_MC_OTHER:
+ ret =
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ break;
+
+ case MLX5E_ALLMULTI:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ default: /* MLX5E_PROMISC */
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type,
+ void *flow_context, void *match_criteria)
+{
+ u8 match_criteria_enable = 0;
+ void *match_value;
+ void *dest;
+ u8 *dmac;
+ u8 *match_criteria_dmac;
+ void *ft = priv->ft.main;
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err;
+
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+ match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ memset(match_criteria_dmac, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ match_criteria_dmac[0] = 0x01;
+ dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ tt_vec = mlx5e_get_tt_vec(ai, type);
+
+ if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_ANY]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_ANY]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ }
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_UDP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_TCP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ }
+
+ return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type)
+{
+ u32 *flow_context;
+ u32 *match_criteria;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_eth_addr_rule_out;
+ }
+
+ err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+ match_criteria);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+enum mlx5e_vlan_rule_type {
+ MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+ MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u8 match_criteria_enable = 0;
+ u32 *flow_context;
+ void *match_value;
+ void *dest;
+ u32 *match_criteria;
+ u32 *ft_ix;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ mlx5_get_flow_table_id(priv->ft.main));
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.vlan_tag);
+
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+ ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+ vid);
+ break;
+ }
+
+ err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+ match_criteria, flow_context, ft_ix);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.untagged_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.any_vlan_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.active_vlans_ft_ix[vid]);
+ break;
+ }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = false;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (!priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = true;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mutex_lock(&priv->state_lock);
+
+ clear_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+ int err;
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ return err;
+
+ if (priv->vlan.filter_disabled) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+
+ if (priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_hash_node *hn)
+{
+ switch (hn->action) {
+ case MLX5E_ACTION_ADD:
+ mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ hn->action = MLX5E_ACTION_NONE;
+ break;
+
+ case MLX5E_ACTION_DEL:
+ mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+ mlx5e_del_eth_addr_from_hash(hn);
+ break;
+ }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+ priv->netdev->dev_addr);
+
+ netdev_for_each_uc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+ netdev_for_each_mc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_execute_action(priv, hn);
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ hn->action = MLX5E_ACTION_DEL;
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ hn->action = MLX5E_ACTION_DEL;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_sync_netdev_addr(priv);
+
+ mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct net_device *ndev = priv->netdev;
+
+ bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool broadcast_enabled = rx_mode_enable;
+
+ bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
+ bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
+ bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
+ bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
+ bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
+ bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+
+ if (enable_promisc)
+ mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (enable_allmulti)
+ mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ if (enable_broadcast)
+ mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+ mlx5e_handle_netdev_addr(priv);
+
+ if (disable_broadcast)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ if (disable_allmulti)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ if (disable_promisc)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+ ea->promisc_enabled = promisc_enabled;
+ ea->allmulti_enabled = allmulti_enabled;
+ ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_set_rx_mode_core(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+ ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+ u8 *dmac;
+
+ g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 2;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.ethertype);
+
+ g[2].log_sz = 0;
+
+ g[3].log_sz = 14;
+ g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[4].log_sz = 13;
+ g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+ outer_headers.ethertype);
+
+ g[5].log_sz = 11;
+ g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+
+ g[6].log_sz = 2;
+ g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[7].log_sz = 1;
+ g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+ outer_headers.ethertype);
+
+ g[8].log_sz = 0;
+ g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 9, g);
+ kfree(g);
+
+ return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+
+ g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 12;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.first_vid);
+
+ /* untagged + any vlan id */
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.vlan_tag);
+
+ priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 2, g);
+
+ kfree(g);
+ return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_main_flow_table(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_vlan_flow_table(priv);
+ if (err)
+ goto err_destroy_main_flow_table;
+
+ return 0;
+
+err_destroy_main_flow_table:
+ mlx5e_destroy_main_flow_table(priv);
+
+ return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644
index 000000000000..9a48d8eac0fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -0,0 +1,1906 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc[MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc[MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+ u16 eq_ix;
+};
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 port_state;
+
+ port_state = mlx5_query_vport_state(mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+ if (port_state == VPORT_STATE_UP)
+ netif_carrier_on(priv->netdev);
+ else
+ netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_carrier_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_carrier(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u64 tx_offload_none;
+ int i, j;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ /* Collect firts the SW counters and then HW for consistency */
+ s->tso_packets = 0;
+ s->tso_bytes = 0;
+ s->tx_queue_stopped = 0;
+ s->tx_queue_wake = 0;
+ s->tx_queue_dropped = 0;
+ tx_offload_none = 0;
+ s->lro_packets = 0;
+ s->lro_bytes = 0;
+ s->rx_csum_none = 0;
+ s->rx_wqe_err = 0;
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->lro_packets += rq_stats->lro_packets;
+ s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_wqe_err += rq_stats->wqe_err;
+
+ for (j = 0; j < priv->num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tso_packets += sq_stats->tso_packets;
+ s->tso_bytes += sq_stats->tso_bytes;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ tx_offload_none += sq_stats->csum_offload_none;
+ }
+ }
+
+ /* HW counters */
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+ memset(out, 0, outlen);
+
+ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
+
+ s->rx_error_packets =
+ MLX5_GET_CTR(out, received_errors.packets);
+ s->rx_error_bytes =
+ MLX5_GET_CTR(out, received_errors.octets);
+ s->tx_error_packets =
+ MLX5_GET_CTR(out, transmit_errors.packets);
+ s->tx_error_bytes =
+ MLX5_GET_CTR(out, transmit_errors.octets);
+
+ s->rx_unicast_packets =
+ MLX5_GET_CTR(out, received_eth_unicast.packets);
+ s->rx_unicast_bytes =
+ MLX5_GET_CTR(out, received_eth_unicast.octets);
+ s->tx_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+ s->tx_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+ s->rx_multicast_packets =
+ MLX5_GET_CTR(out, received_eth_multicast.packets);
+ s->rx_multicast_bytes =
+ MLX5_GET_CTR(out, received_eth_multicast.octets);
+ s->tx_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+ s->tx_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+ s->rx_broadcast_packets =
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ s->rx_broadcast_bytes =
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ s->tx_broadcast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ s->tx_broadcast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ s->rx_packets =
+ s->rx_unicast_packets +
+ s->rx_multicast_packets +
+ s->rx_broadcast_packets;
+ s->rx_bytes =
+ s->rx_unicast_bytes +
+ s->rx_multicast_bytes +
+ s->rx_broadcast_bytes;
+ s->tx_packets =
+ s->tx_unicast_packets +
+ s->tx_multicast_packets +
+ s->tx_broadcast_packets;
+ s->tx_bytes =
+ s->tx_unicast_bytes +
+ s->tx_multicast_bytes +
+ s->tx_broadcast_bytes;
+
+ /* Update calculated offload counters */
+ s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+
+free_out:
+ kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ update_stats_work);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_update_stats(priv);
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(
+ MLX5E_UPDATE_STATS_INTERVAL));
+ }
+ mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+ enum mlx5_dev_event event)
+{
+ switch (event) {
+ case MLX5_DEV_EVENT_PORT_UP:
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ schedule_work(&priv->update_carrier_work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ spin_lock(&priv->async_events_spinlock);
+ if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ __mlx5e_async_event(priv, event);
+ spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+ spin_lock_irq(&priv->async_events_spinlock);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int wq_sz;
+ int err;
+ int i;
+
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+
+ rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+ u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+
+ wqe->data.lkey = c->mkey_be;
+ wqe->data.byte_count =
+ cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ }
+
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->channel = c;
+ rq->ix = c->ix;
+
+ return 0;
+
+err_rq_wq_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+ kfree(rq->skb);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * rq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+ MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&rq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_wq_ll *wq = &rq->wq;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (wq->cur_sz >= priv->params.min_rx_wqes)
+ return 0;
+
+ msleep(20);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ int err;
+
+ err = mlx5e_create_rq(c, param, rq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_rq(rq, param);
+ if (err)
+ goto err_destroy_rq;
+
+ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_disable_rq;
+
+ set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+ return 0;
+
+err_disable_rq:
+ mlx5e_disable_rq(rq);
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+
+ return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+ mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq))
+ msleep(20);
+
+ /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+ napi_synchronize(&rq->channel->napi);
+
+ mlx5e_disable_rq(rq);
+ mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->dma_fifo);
+ kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+ sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+ numa);
+
+ if (!sq->skb || !sq->dma_fifo) {
+ mlx5e_free_sq_db(sq);
+ return -ENOMEM;
+ }
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *sqc = param->sqc;
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ if (err)
+ return err;
+
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+ &sq->wq_ctrl);
+ if (err)
+ goto err_unmap_free_uar;
+
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ sq->uar_map = sq->uar.map;
+ sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+ if (err)
+ goto err_sq_wq_destroy;
+
+ sq->txq = netdev_get_tx_queue(priv->netdev,
+ c->ix + tc * priv->params.num_channels);
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+ sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &sq->uar);
+
+ return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+
+ mlx5e_free_sq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+ MLX5_SET(sqc, sqc, user_index, sq->tc);
+ MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ int err;
+
+ err = mlx5e_create_sq(c, tc, param, sq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_sq(sq, param);
+ if (err)
+ goto err_destroy_sq;
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ if (err)
+ goto err_disable_sq;
+
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+
+ return 0;
+
+err_disable_sq:
+ mlx5e_disable_sq(sq);
+err_destroy_sq:
+ mlx5e_destroy_sq(sq);
+
+ return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+ netif_tx_disable_queue(sq->txq);
+
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq, true);
+
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ while (sq->cc != sq->pc) /* wait till sq is empty */
+ msleep(20);
+
+ /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+ napi_synchronize(&sq->channel->napi);
+
+ mlx5e_disable_sq(sq);
+ mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+ u32 i;
+
+ param->wq.numa = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ cq->napi = &c->napi;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->channel = c;
+
+ return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+
+ void *in;
+ void *cqc;
+ int inlen;
+ int irqn_not_used;
+ int eqn;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+ kvfree(in);
+
+ if (err)
+ return err;
+
+ mlx5e_cq_arm(cq);
+
+ return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ u16 moderation_usecs,
+ u16 moderation_frames)
+{
+ int err;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ err = mlx5e_create_cq(c, param, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
+ if (err)
+ goto err_destroy_cq;
+
+ return 0;
+
+err_destroy_cq:
+ mlx5e_destroy_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+ mlx5e_disable_cq(cq);
+ mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ struct mlx5e_priv *priv = c->priv;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+
+ c->sq[tc].cq.sqrq = &c->sq[tc];
+ }
+
+ return 0;
+
+err_close_tx_cqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_cq(&c->sq[tc].cq);
+
+ return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ if (err)
+ goto err_close_sqs;
+ }
+
+ return 0;
+
+err_close_sqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_sq(&c->sq[tc]);
+
+ return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_sq(&c->sq[tc]);
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_channel **cp)
+{
+ struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
+ struct mlx5e_channel *c;
+ int err;
+
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->ix = ix;
+ c->cpu = cpu;
+ c->pdev = &priv->mdev->pdev->dev;
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->num_tc = priv->num_tc;
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_napi_del;
+
+ err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+ c->rq.cq.sqrq = &c->rq;
+
+ napi_enable(&c->napi);
+
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_disable_napi;
+
+ err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ if (err)
+ goto err_close_sqs;
+
+ netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+ *cp = c;
+
+ return 0;
+
+err_close_sqs:
+ mlx5e_close_sqs(c);
+
+err_disable_napi:
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+ mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+ netif_napi_del(&c->napi);
+ kfree(c);
+
+ return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_sqs(c);
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_tx_cqs(c);
+ netif_napi_del(&c->napi);
+ kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_channel_param *cparam)
+{
+ memset(cparam, 0, sizeof(*cparam));
+
+ mlx5e_build_rq_param(priv, &cparam->rq);
+ mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel_param cparam;
+ int err;
+ int i;
+ int j;
+
+ priv->channel = kcalloc(priv->params.num_channels,
+ sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ if (!priv->channel)
+ return -ENOMEM;
+
+ mlx5e_build_channel_param(priv, &cparam);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ if (err)
+ goto err_close_channels;
+ }
+
+ for (j = 0; j < priv->params.num_channels; j++) {
+ err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ if (err)
+ goto err_close_channels;
+ }
+
+ return 0;
+
+err_close_channels:
+ for (i--; i >= 0; i--)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->channel);
+
+ return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ err = mlx5e_open_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++)
+ mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ void *rqtc;
+ int inlen;
+ int err;
+ int sz;
+ int i;
+
+ sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++) {
+ int ix = i % priv->params.num_channels;
+
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+ }
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+ mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+ if (priv->params.lro_en) {
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[3]));
+ }
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn,
+ priv->channel[0]->rq.rqn);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key),
+ MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key));
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_open_tir(priv, i);
+ if (err)
+ goto err_close_tirs;
+ }
+
+ return 0;
+
+err_close_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_close_tir(priv, i);
+
+ return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_close_tir(priv, i);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int hw_mtu;
+ int err;
+
+ err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+ if (err)
+ return err;
+
+ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+ if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+ netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+ __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+ netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int num_txqs;
+ int err;
+
+ num_txqs = roundup_pow_of_two(priv->params.num_channels) *
+ priv->params.num_tc;
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+ err = mlx5e_set_dev_port_mtu(netdev);
+ if (err)
+ return err;
+
+ err = mlx5e_open_tises(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+ goto err_close_tises;
+ }
+
+ err = mlx5e_open_rqt(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+
+ err = mlx5e_open_tirs(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+ __func__, err);
+ goto err_close_rqls;
+ }
+
+ err = mlx5e_open_flow_table(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+ __func__, err);
+ goto err_close_tirs;
+ }
+
+ err = mlx5e_add_all_vlan_rules(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+ __func__, err);
+ goto err_close_flow_table;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_update_carrier(priv);
+ mlx5e_set_rx_mode_core(priv);
+
+ schedule_delayed_work(&priv->update_stats_work, 0);
+ return 0;
+
+err_close_flow_table:
+ mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+ mlx5e_close_tirs(priv);
+
+err_close_rqls:
+ mlx5e_close_rqt(priv);
+
+err_close_channels:
+ mlx5e_close_channels(priv);
+
+err_close_tises:
+ mlx5e_close_tises(priv);
+
+ return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_open_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_set_rx_mode_core(priv);
+ mlx5e_del_all_vlan_rules(priv);
+ netif_carrier_off(priv->netdev);
+ mlx5e_close_flow_table(priv);
+ mlx5e_close_tirs(priv);
+ mlx5e_close_rqt(priv);
+ mlx5e_close_channels(priv);
+ mlx5e_close_tises(priv);
+
+ return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params)
+{
+ int err = 0;
+ int was_opened;
+
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params = *new_params;
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+ stats->rx_packets = vstats->rx_packets;
+ stats->rx_bytes = vstats->rx_bytes;
+ stats->tx_packets = vstats->tx_packets;
+ stats->tx_bytes = vstats->tx_bytes;
+ stats->multicast = vstats->rx_multicast_packets +
+ vstats->tx_multicast_packets;
+ stats->tx_errors = vstats->tx_error_packets;
+ stats->rx_errors = vstats->rx_error_packets;
+ stats->tx_dropped = vstats->tx_queue_dropped;
+ stats->rx_crc_errors = 0;
+ stats->rx_length_errors = 0;
+
+ return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netif_addr_lock_bh(netdev);
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ netif_addr_unlock_bh(netdev);
+
+ schedule_work(&priv->set_rx_mode_work);
+
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ netdev_features_t changes = features ^ netdev->features;
+ struct mlx5e_params new_params;
+ bool update_params = false;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+
+ if (changes & NETIF_F_LRO) {
+ new_params.lro_en = !!(features & NETIF_F_LRO);
+ update_params = true;
+ }
+
+ if (update_params)
+ mlx5e_update_priv_params(priv, &new_params);
+
+ if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_mtu;
+ int err;
+
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+
+ if (new_mtu > max_mtu) {
+ netdev_err(netdev,
+ "%s: Bad MTU (%d) > (%d) Max\n",
+ __func__, new_mtu, max_mtu);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ err = mlx5e_update_priv_params(priv, &priv->params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -ENOTSUPP;
+ if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+ !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+ !MLX5_CAP_ETH(mdev, csum_cap) ||
+ !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+ !MLX5_CAP_ETH(mdev, vlan_cap) ||
+ !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+ MLX5_CAP_FLOWTABLE(mdev,
+ flow_table_properties_nic_receive.max_ft_level)
+ < 3) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, some required device capabilities are missing\n");
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ int num_comp_vectors)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_rq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ priv->params.rx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ priv->params.tx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ priv->params.tx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.min_rx_wqes =
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ priv->params.rx_hash_log_tbl_sz =
+ (order_base_2(num_comp_vectors) >
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+ order_base_2(num_comp_vectors) :
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+ priv->params.num_tc = 1;
+ priv->params.default_vlan_prio = 0;
+
+ priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = num_comp_vectors;
+ priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
+ priv->queue_mapping_channel_mask =
+ roundup_pow_of_two(num_comp_vectors) - 1;
+ priv->num_tc = priv->params.num_tc;
+ priv->default_vlan_prio = priv->params.default_vlan_prio;
+
+ spin_lock_init(&priv->async_events_spinlock);
+ mutex_init(&priv->state_lock);
+
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+ if (priv->num_tc > 1) {
+ mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+ mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc;
+ }
+
+ netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_ethtool_ops;
+
+ netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_GRO;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_RXCSUM;
+ netdev->vlan_features |= NETIF_F_RXHASH;
+
+ if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ netdev->vlan_features |= NETIF_F_LRO;
+
+ netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+ if (!priv->params.lro_en)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+ struct mlx5_core_mr *mr)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int err;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
+ ncv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
+ }
+
+ mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev(netdev);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+ __func__, err);
+ goto err_free_netdev;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+ __func__, err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+ __func__, err);
+ goto err_dealloc_transport_domain;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "%s: register_netdev failed, %d\n",
+ __func__, err);
+ goto err_destroy_mkey;
+ }
+
+ mlx5e_enable_async_events(priv);
+
+ return priv;
+
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_transport_domain:
+ mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ unregister_netdev(netdev);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
+ mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+ mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
+ free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+ .add = mlx5e_create_netdev,
+ .remove = mlx5e_destroy_netdev,
+ .event = mlx5e_async_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+ .get_dev = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+ mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+ mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 000000000000..06e7c744ed4a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(rq->pdev,
+ /* hw start padding */
+ skb->data,
+ /* hw end padding */
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+ goto err_free_skb;
+
+ skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+ *((dma_addr_t *)skb->cb) = dma_addr;
+ wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+ rq->skb[ix] = skb;
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ return false;
+
+ while (!mlx5_wq_ll_is_full(wq)) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ break;
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ }
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+
+ return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct tcphdr *tcp;
+
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+ u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct iphdr));
+ ipv6 = NULL;
+ } else {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct ipv6hdr));
+ ipv4 = NULL;
+ }
+
+ if (get_cqe_lro_tcppsh(cqe))
+ tcp->psh = 1;
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+
+ if (ipv4) {
+ ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->tot_len = cpu_to_be16(tot_len);
+ ipv4->check = 0;
+ ipv4->check = ip_fast_csum((unsigned char *)ipv4,
+ ipv4->ihl);
+ } else {
+ ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->payload_len = cpu_to_be16(tot_len -
+ sizeof(struct ipv6hdr));
+ }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+ u8 cht = cqe->rss_hash_type;
+ int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+ (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_NONE;
+ skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ int lro_num_seg;
+
+ skb_put(skb, cqe_bcnt);
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg > 1) {
+ mlx5e_lro_update_hdr(skb, cqe);
+ skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ rq->stats.lro_packets++;
+ rq->stats.lro_bytes += cqe_bcnt;
+ }
+
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (cqe->hds_ip_ext & CQE_L2_OK) &&
+ (cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ if (cqe_has_vlan(cqe))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = cq->sqrq;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ for (i = 0; i < budget; i++) {
+ struct mlx5e_rx_wqe *wqe;
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_build_rx_skb(cqe, rq, skb);
+ rq->stats.packets++;
+ napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (i == budget) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 000000000000..bac268a670f4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+ MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+ sq->skb[pi] = NULL;
+ sq->pc++;
+
+ if (notify_hw) {
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, wqe);
+ }
+}
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+ u32 *size)
+{
+ sq->dma_fifo_pc--;
+ *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ dma_addr_t addr;
+ u32 size;
+ int i;
+
+ for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+ mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+ u32 size)
+{
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+ sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+ u32 *size)
+{
+ *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int channel_ix = fallback(dev, skb);
+ int up = skb_vlan_tag_present(skb) ?
+ skb->vlan_tci >> VLAN_PRIO_SHIFT :
+ priv->default_vlan_prio;
+ int tc = netdev_get_prio_tc_map(dev, up);
+
+ return (tc << priv->order_base_2_num_channels) | channel_ix;
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+ struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ return MLX5E_MIN_INLINE;
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ u8 opcode = MLX5_OPCODE_SEND;
+ dma_addr_t dma_addr = 0;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+ int i;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ else
+ sq->stats.csum_offload_none++;
+
+ if (skb_is_gso(skb)) {
+ u32 payload_len;
+ int num_pkts;
+
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ opcode = MLX5_OPCODE_LSO;
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload_len = skb->len - ihs;
+ num_pkts = (payload_len / skb_shinfo(skb)->gso_size) +
+ !!(payload_len % skb_shinfo(skb)->gso_size);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+ (num_pkts - 1) * ihs;
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += payload_len;
+ } else {
+ ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+ ETH_ZLEN);
+ }
+
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+
+ eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+ MLX5_SEND_WQE_DS);
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+ MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+ headlen = skb_headlen(skb);
+ if (headlen) {
+ dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(headlen);
+
+ mlx5e_dma_push(sq, dma_addr, headlen);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int fsz = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+
+ mlx5e_dma_push(sq, dma_addr, fsz);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ sq->skb[pi] = skb;
+
+ MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+ MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+ netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats.stopped++;
+ }
+
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ mlx5e_tx_notify_hw(sq, wqe);
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((sq->pc & wq->sz_m1) > sq->edge)
+ mlx5e_send_nop(sq, false);
+
+ sq->stats.packets++;
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, skb);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ix = skb->queue_mapping;
+ int tc = 0;
+ struct mlx5e_channel *c = priv->channel[ix];
+ struct mlx5e_sq *sq = &c->sq[tc];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
+ int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
+ struct mlx5e_channel *c = priv->channel[ix];
+ struct mlx5e_sq *sq = &c->sq[tc];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u32 dma_fifo_cc;
+ u32 nbytes;
+ u16 npkts;
+ u16 sqcc;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ sq = cq->sqrq;
+
+ npkts = 0;
+ nbytes = 0;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ /* avoid dirtying sq cache line every cqe */
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ u16 ci;
+ int j;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ ci = sqcc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+
+ if (unlikely(!skb)) { /* nop */
+ sq->stats.nop++;
+ sqcc++;
+ goto free_skb;
+ }
+
+ for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+ dma_addr_t addr;
+ u32 size;
+
+ mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+ dma_fifo_cc++;
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+
+ npkts++;
+ nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+ sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+free_skb:
+ dev_kfree_skb(skb);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
+ likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
+ }
+ if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 000000000000..088bc424157c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ mlx5_cqwq_pop(wq);
+
+ /* ensure cqe content is read after cqe ownership bit */
+ rmb();
+
+ return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+ napi);
+ bool busy = false;
+ int i;
+
+ clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+ for (i = 0; i < c->num_tc; i++)
+ busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+ busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+ busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq);
+
+ if (busy)
+ return budget;
+
+ napi_complete(napi);
+
+ /* avoid losing completion event during/after polling cqs */
+ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+ napi_schedule(napi);
+ return 0;
+ }
+
+ for (i = 0; i < c->num_tc; i++)
+ mlx5e_cq_arm(&c->sq[i].cq);
+ mlx5e_cq_arm(&c->rq.cq);
+
+ return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+ barrier();
+ napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct net_device *netdev = priv->netdev;
+
+ netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+ __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58800e4f3958..a40b96d4c662 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
- &eq->buf);
+ err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
- snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+
eq->eqn = out.eq_number;
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- eq->name, eq);
+ err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
return 0;
err_irq:
- free_irq(table->msix_arr[vecidx].vector, eq);
+ free_irq(priv->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(table->msix_arr[eq->irqn].vector, eq);
+ free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(table->msix_arr[eq->irqn].vector);
+ synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
- if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ if (MLX5_CAP_GEN(dev, pg))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->pages_eq,
MLX5_EQ_VEC_PAGES,
- dev->caps.gen.max_vf + 1,
+ /* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0]);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644
index 000000000000..ca90b9bc3b95
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+ struct mlx5_flow_table_group g;
+ u32 id;
+ u32 start_ix;
+};
+
+struct mlx5_flow_table {
+ struct mlx5_core_dev *dev;
+ u8 level;
+ u8 type;
+ u32 id;
+ struct mutex mutex; /* sync bitmap alloc */
+ u16 num_groups;
+ struct mlx5_ftg *group;
+ unsigned long *bitmap;
+ u32 size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+ u32 flow_index, void *flow_context)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 *in;
+ void *in_flow_context;
+ int fcdls =
+ MLX5_GET(flow_context, flow_context, destination_list_size) *
+ MLX5_ST_SZ_BYTES(dest_format_struct);
+ int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, flow_index);
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ memcpy(in_flow_context, flow_context,
+ MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+ MLX5_SET(flow_context, in_flow_context, group_id,
+ ft->group[group_ix].id);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+ MLX5_SET_DFTEI(in, table_type, ft->type);
+ MLX5_SET_DFTEI(in, table_id, ft->id);
+ MLX5_SET_DFTEI(in, flow_index, flow_index);
+ MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+ MLX5_SET_DFGI(in, table_type, ft->type);
+ MLX5_SET_DFGI(in, table_id, ft->id);
+ MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ u32 *in;
+ void *in_match_criteria;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ u32 start_ix = ft->group[i].start_ix;
+ u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+ in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria);
+
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+ MLX5_SET_CFGI(in, table_type, ft->type);
+ MLX5_SET_CFGI(in, table_id, ft->id);
+ MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET_CFGI(in, start_flow_index, start_ix);
+ MLX5_SET_CFGI(in, end_flow_index, end_ix);
+ MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+ memcpy(in_match_criteria, g->match_criteria,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ if (!err)
+ ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++)
+ mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ err = mlx5_create_flow_group_cmd(ft, i);
+ if (err)
+ goto err_destroy_flow_table_groups;
+ }
+
+ return 0;
+
+err_destroy_flow_table_groups:
+ for (i--; i >= 0; i--)
+ mlx5_destroy_flow_group_cmd(ft, i);
+
+ return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, level, ft->level);
+ MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+ MLX5_SET_DFTI(in, table_type, ft->type);
+ MLX5_SET_DFTI(in, table_id, ft->id);
+ MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+ u32 *match_criteria, int *group_ix)
+{
+ void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+ void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters);
+ void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ inner_headers);
+ int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+ int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ outer_headers);
+ void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ misc_parameters);
+ void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ inner_headers);
+
+ if (g->match_criteria_enable != match_criteria_enable)
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+ if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+ if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+ if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+ continue;
+
+ *group_ix = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+ struct mlx5_ftg *g = &ft->group[group_ix];
+ int err = 0;
+
+ mutex_lock(&ft->mutex);
+
+ *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+ if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+ err = -ENOSPC;
+ else
+ __set_bit(*ix, ft->bitmap);
+
+ mutex_unlock(&ft->mutex);
+
+ return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+ __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int group_ix;
+ int err;
+
+ err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+ &group_ix);
+ if (err) {
+ mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+ return err;
+ }
+
+ err = alloc_flow_index(ft, group_ix, flow_index);
+ if (err) {
+ mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+ return err;
+ }
+
+ return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_del_flow_entry_cmd(ft, flow_index);
+ mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group)
+{
+ struct mlx5_flow_table *ft;
+ u32 start_ix = 0;
+ u32 ft_size = 0;
+ void *gr;
+ void *bm;
+ int err;
+ int i;
+
+ for (i = 0; i < num_groups; i++)
+ ft_size += (1 << group[i].log_sz);
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+ bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+ if (!ft || !gr || !bm)
+ goto err_free_ft;
+
+ ft->group = gr;
+ ft->bitmap = bm;
+ ft->num_groups = num_groups;
+ ft->level = level;
+ ft->type = table_type;
+ ft->size = ft_size;
+ ft->dev = dev;
+ mutex_init(&ft->mutex);
+
+ for (i = 0; i < ft->num_groups; i++) {
+ memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+ ft->group[i].start_ix = start_ix;
+ start_ix += 1 << group[i].log_sz;
+ }
+
+ err = mlx5_create_flow_table_cmd(ft);
+ if (err)
+ goto err_free_ft;
+
+ err = mlx5_create_flow_table_groups(ft);
+ if (err)
+ goto err_destroy_flow_table_cmd;
+
+ return ft;
+
+err_destroy_flow_table_cmd:
+ mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+ mlx5_core_warn(dev, "failed to alloc flow table\n");
+ kfree(bm);
+ kfree(gr);
+ kfree(ft);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_destroy_flow_table_groups(ft);
+ mlx5_destroy_flow_table_cmd(ft);
+ kfree(ft->bitmap);
+ kfree(ft->group);
+ kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 4b4cda3bcc5f..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,79 +35,133 @@
#include <linux/module.h>
#include "mlx5_core.h"
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+ int outlen)
{
- struct mlx5_cmd_query_adapter_mbox_out *out;
- struct mlx5_cmd_query_adapter_mbox_in in;
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
- out = kzalloc(sizeof(*out), GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- memset(&in, 0, sizeof(in));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ err = mlx5_cmd_query_adapter(dev, out, outlen);
if (err)
- goto out_out;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
- goto out_out;
- }
+ goto out;
- memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
+ memcpy(dev->board_id,
+ MLX5_ADDR_OF(query_adapter_out, out,
+ query_adapter_struct.vsd_contd_psid),
+ MLX5_FLD_SZ_BYTES(query_adapter_out,
+ query_adapter_struct.vsd_contd_psid));
-out_out:
+out:
kfree(out);
-
return err;
}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
- return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ if (err)
+ goto out;
+
+ *vendor_id = MLX5_GET(query_adapter_out, out,
+ query_adapter_struct.ieee_vendor_id);
+out:
+ kfree(out);
+ return err;
}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
- u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
- int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
int err;
- if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
- return -ENOTSUPP;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
- goto out;
+ return err;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err) {
- mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
- goto out;
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
}
- memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
- sizeof(*caps));
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
- mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
- be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+ if (MLX5_CAP_GEN(dev, atomic)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
-out:
- kfree(out);
- return err;
+ if (MLX5_CAP_GEN(dev, roce)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+ return 0;
}
-EXPORT_SYMBOL(mlx5_query_odp_caps);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28425e5ea91f..afad529838de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
@@ -47,10 +48,6 @@
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE "January 2015"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
static int mlx5_enable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- int num_eqs = 1 << dev->caps.gen.log_max_eq;
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int i;
- nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
return -ENOMEM;
- table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
- if (!table->msix_arr)
- return -ENOMEM;
+ priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+ priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+ if (!priv->msix_arr || !priv->irq_info)
+ goto err_free_msix;
for (i = 0; i < nvec; i++)
- table->msix_arr[i].entry = i;
+ priv->msix_arr[i].entry = i;
- nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
+
+err_free_msix:
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
+ return -ENOMEM;
}
static void mlx5_disable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
pci_disable_msix(dev->pdev);
- kfree(table->msix_arr);
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
}
struct mlx5_reg_host_endianess {
@@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
-{
- __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
- u64 v64;
-
- MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
- MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
- v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
- *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
- if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
- return 0;
-
- return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
-}
-
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
- struct mlx5_general_caps *gen = &caps->gen;
-
- gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
- gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
- gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
- gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
- gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
- gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
- gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
- gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
- gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
- gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
- gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
- gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
- gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
- gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
- gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
- gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
- gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
- gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
- gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
- gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
- gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
- gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
- gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
- gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
- gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
- pr_debug("flags = 0x%llx\n", gen->flags);
- gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
- gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
- gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
- gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
- gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
- gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
- gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
- gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
- gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
- gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
- gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
- switch (opmod) {
- case HCA_CAP_OPMOD_GET_MAX:
- return "GET_MAX";
- case HCA_CAP_OPMOD_GET_CUR:
- return "GET_CUR";
- default:
- return "Invalid";
- }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
+ void *out, *hca_caps;
+ u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
+
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
- mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+ mlx5_core_warn(dev,
+ "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+ cap_type, cap_mode, err);
goto query_ex;
}
- mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
- fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ switch (cap_mode) {
+ case HCA_CAP_OPMOD_GET_MAX:
+ memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ case HCA_CAP_OPMOD_GET_CUR:
+ memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+ cap_type, cap_mode);
+ err = -EINVAL;
+ break;
+ }
query_ex:
kfree(out);
return err;
@@ -409,49 +356,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- struct mlx5_caps *cur_caps = NULL;
- struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
- max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
- if (!max_caps)
- goto query_ex;
-
- cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
- if (!cur_caps)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
goto query_ex;
- err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
goto query_ex;
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+ mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+ mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ 128);
/* we limit the size of the pkey table to 128 entries for now */
- cur_caps->gen.pkey_table_size = 128;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+ to_fw_pkey_sz(128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
- cur_caps->gen.log_max_qp = prof->log_max_qp;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+ prof->log_max_qp);
- /* disable checksum */
- cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+ /* disable cmdif checksum */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
- cur_caps);
err = set_caps(dev, set_ctx, set_sz);
query_ex:
- kfree(cur_caps);
- kfree(max_caps);
kfree(set_ctx);
-
return err;
}
@@ -507,6 +450,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+ int numa_node = dev_to_node(&mdev->pdev->dev);
+ int err;
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ priv->irq_info[i].mask);
+
+ err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+ if (err) {
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+ irq);
+ goto err_clear_mask;
+ }
+
+ return 0;
+
+err_clear_mask:
+ free_cpumask_var(priv->irq_info[i].mask);
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +560,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
static int alloc_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_EQ_NAME];
+ char name[MLX5_MAX_IRQ_NAME];
struct mlx5_eq *eq;
int ncomp_vec;
int nent;
@@ -566,7 +577,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
- snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0]);
@@ -588,6 +599,61 @@ clean:
return err;
}
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+ int err;
+ u32 sup_issi;
+
+ memset(query_in, 0, sizeof(query_in));
+ memset(query_out, 0, sizeof(query_out));
+
+ MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+ err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
+ if (err) {
+ if (((struct mlx5_outbox_hdr *)query_out)->status ==
+ MLX5_CMD_STAT_BAD_OP_ERR) {
+ pr_debug("Only ISSI 0 is supported\n");
+ return 0;
+ }
+
+ pr_err("failed to query ISSI\n");
+ return err;
+ }
+
+ sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+ if (sup_issi & (1 << 1)) {
+ memset(set_in, 0, sizeof(set_in));
+ memset(set_out, 0, sizeof(set_out));
+
+ MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+ MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
+ if (err) {
+ pr_err("failed to set ISSI=1\n");
+ return err;
+ }
+
+ dev->issi = 1;
+
+ return 0;
+ } else if (sup_issi & (1 << 0) || !sup_issi) {
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+#endif
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +716,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_cleanup;
}
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_core_set_issi(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set issi\n");
+ goto err_disable_hca;
+ }
+#endif
+
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,15 +762,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_start_health_poll(dev);
- err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+ err = mlx5_query_hca_caps(dev);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
goto err_stop_poll;
}
- err = mlx5_cmd_query_adapter(dev);
+ err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query adapter failed\n");
+ dev_err(&pdev->dev, "query board id failed\n");
goto err_stop_poll;
}
@@ -730,6 +804,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_free_comp_eqs;
+ }
+
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
@@ -739,6 +819,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
+err_free_comp_eqs:
+ free_comp_eqs(dev);
+
err_stop_eqs:
mlx5_stop_eqs(dev);
@@ -793,6 +876,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1132,10 @@ static int __init init(void)
if (err)
goto err_health;
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_init();
+#endif
+
return 0;
err_health:
@@ -1060,6 +1148,9 @@ err_debug:
static void __exit cleanup(void)
{
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_cleanup();
+#endif
pci_unregister_driver(&mlx5_core_driver);
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d79fd85d1dd5..d5a0c2d61a18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a051b906afdf..fc88ecaecb4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE "January 2015"
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, ...) \
@@ -65,11 +69,20 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+ int in_size, u32 *out,
+ int out_size)
+{
+ mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
- struct mlx5_caps *caps);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 49e90f2612d8..70147999f657 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -102,3 +102,229 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ else
+ *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ else
+ *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+ if (err)
+ return err;
+
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ else
+ *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, admin_status, status);
+ MLX5_SET(paos_reg, in, ase, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(paos_reg, out, oper_status);
+ return err;
+}
+
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+ int *max_mtu, int *oper_mtu, u8 port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, local_port, port);
+
+ mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
+
+ if (max_mtu)
+ *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
+ if (oper_mtu)
+ *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+ if (admin_mtu)
+ *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+ MLX5_SET(pmtu_reg, in, local_port, port);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+ u8 port)
+{
+ mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port)
+{
+ mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ int pvlc_size, u8 local_port)
+{
+ u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, local_port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+ pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+ return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port)
+{
+ u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+ int err;
+
+ err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+ if (err)
+ return err;
+
+ *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index dc7dbf7e9d98..8b494b562263 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
+ void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+ if (dev->issi) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(qpc, qpc, user_index, 0xffffff);
+ }
+
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index f9d25dcd03c1..c48f504ccbeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
+#include "transobj.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
@@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
+static int get_pas_size(void *srqc)
+{
+ u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+ u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
+ u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+ u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+ void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ if (srqc_to_rmpc) {
+ switch (MLX5_GET(srqc, srqc, state)) {
+ case MLX5_SRQC_STATE_GOOD:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ break;
+ case MLX5_SRQC_STATE_ERROR:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+ __LINE__, MLX5_GET(srqc, srqc, state));
+ MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+ }
+
+ MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
+ MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
+ MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
+ MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
+ MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
+ } else {
+ switch (MLX5_GET(rmpc, rmpc, state)) {
+ case MLX5_RMPC_STATE_RDY:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+ break;
+ case MLX5_RMPC_STATE_ERR:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+ break;
+ default:
+ pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+ __func__, __LINE__,
+ MLX5_GET(rmpc, rmpc, state));
+ MLX5_SET(srqc, srqc, state,
+ MLX5_GET(rmpc, rmpc, state));
+ }
+
+ MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
+ MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
+ MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
+ MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
+ MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
+ MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
+ MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
+ MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
+ }
+}
+
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_destroy_srq_mbox_in din;
- struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
+
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
- if (err)
- return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
+ err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+ sizeof(out));
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ struct mlx5_destroy_srq_mbox_in in;
+ struct mlx5_destroy_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ struct mlx5_arm_srq_mbox_in in;
+ struct mlx5_arm_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+ in.hdr.opmod = cpu_to_be16(!!is_srq);
+ in.srqn = cpu_to_be32(srq->srqn);
+ in.lwm = cpu_to_be16(lwm);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+ sizeof(in), (u32 *)(&out),
+ sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ struct mlx5_query_srq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+ (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int srq_inlen)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *srqc;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ memcpy(pas, in->pas, pas_size);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq, u16 lwm)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+ memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
+
+ return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 *xrcsrq_out;
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!xrcsrq_out)
+ return -ENOMEM;
+ memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+ MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+ void *create_in;
+ void *rmpc;
+ void *srqc;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+ memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ rmpc_srqc_reformat(srqc, rmpc, true);
+
+ err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *rmp_out;
+ void *rmpc;
+ void *srqc;
+ int err;
+
+ rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ if (!rmp_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+ kvfree(rmp_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int inlen, int is_xrc)
+{
+ if (!dev->issi)
+ return create_srq_cmd(dev, srq, in, inlen);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return create_xrc_srq_cmd(dev, srq, in, inlen);
+ else
+ return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ if (!dev->issi)
+ return destroy_srq_cmd(dev, srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return destroy_xrc_srq_cmd(dev, srq);
+ else
+ return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ int err;
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+ err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ if (err)
+ return err;
+
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
- goto err_cmd;
+ goto err_destroy_srq_split;
}
return 0;
-err_cmd:
- memset(&din, 0, sizeof(din));
- memset(&dout, 0, sizeof(dout));
- din.srqn = cpu_to_be32(srq->srqn);
- din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
@@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return -EINVAL;
}
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ err = destroy_srq_split(dev, srq);
if (err)
return err;
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
- struct mlx5_query_srq_mbox_in in;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(out, 0, sizeof(*out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
- if (err)
- return err;
-
- if (out->hdr.status)
- return mlx5_cmd_status_to_err(&out->hdr);
-
- return err;
+ if (!dev->issi)
+ return query_srq_cmd(dev, srq, out);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return query_xrc_srq_cmd(dev, srq, out);
+ else
+ return query_rmp_cmd(dev, srq, out);
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
-
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
-
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- return mlx5_cmd_status_to_err(&out.hdr);
-
- return err;
+ if (!dev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ else
+ return arm_rmp_cmd(dev, srq, lwm);
}
EXPORT_SYMBOL(mlx5_core_arm_srq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644
index 000000000000..8d98b03026d5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ int err;
+
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+ return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+ MLX5_SET(modify_rq_in, in, rqn, rqn);
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+ return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ int err;
+
+ MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+ return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *xsrqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ int err;
+
+ MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+ return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!err) {
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ }
+
+ return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644
index 000000000000..f9ef244710d5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5a89bb1d678a..9ef85873ceea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
- bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+ bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
- MLX5_BF_OFFSET;
+ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+ (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+ MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ phys_addr_t pfn;
+ phys_addr_t uar_bar_start;
+ int err;
+
+ err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ return err;
+ }
+
+ uar_bar_start = pci_resource_start(mdev->pdev, 0);
+ pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+ err = -ENOMEM;
+ goto err_free_uar;
+ }
+
+ return 0;
+
+err_free_uar:
+ mlx5_cmd_free_uar(mdev, uar->index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ iounmap(uar->map);
+ mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644
index 000000000000..b94177ebcf3a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_state);
+
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u8 *out_addr;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context.permanent_address);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+ memset(out, 0, outlen);
+ mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+ ether_addr_copy(addr, &out_addr[2]);
+
+ kvfree(out);
+}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ union ib_gid *tmp;
+ int tbsz;
+ int nout;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+ mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+ vf_num, gid_index, tbsz);
+
+ if (gid_index > tbsz && gid_index != 0xffff)
+ return -EINVAL;
+
+ if (gid_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * sizeof(*gid);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ gid->global.subnet_prefix = tmp->global.subnet_prefix;
+ gid->global.interface_id = tmp->global.interface_id;
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ void *pkarr;
+ int nout;
+ int tbsz;
+ int err;
+ int i;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+ if (pkey_index > tbsz && pkey_index != 0xffff)
+ return -EINVAL;
+
+ if (pkey_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+ MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+ for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+ *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int is_group_manager;
+ void *out;
+ void *ctx;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto ex;
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto ex;
+
+ ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+ rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+ rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+ rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+ rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+ rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+ rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+ rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+ port_physical_state);
+ rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+ rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+ rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+ rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask1_field_select);
+ rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+ rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+ cap_mask2_field_select);
+ rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+ rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+ init_type_reply);
+ rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+ rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+ subnet_timeout);
+ rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+ rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+ rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ qkey_violation_counter);
+ rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+ pkey_violation_counter);
+ rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+ rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+ system_image_guid);
+
+ex:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *sys_image_guid = rep->sys_image_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid)
+{
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+ if (!err)
+ *node_guid = rep->node_guid;
+
+ kfree(rep);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 000000000000..8388411582cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+ return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+ return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int err;
+ int i;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ for (i = 0; i < wq->sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+ mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 000000000000..e0ddd69fb429
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+ int linear;
+ int numa;
+};
+
+struct mlx5_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+};
+
+struct mlx5_wq_cyc {
+ void *buf;
+ __be32 *db;
+ u16 sz_m1;
+ u8 log_stride;
+};
+
+struct mlx5_cqwq {
+ void *buf;
+ __be32 *db;
+ u32 sz_m1;
+ u32 cc; /* consumer counter */
+ u8 log_sz;
+ u8 log_stride;
+};
+
+struct mlx5_wq_ll {
+ void *buf;
+ __be32 *db;
+ __be16 *tail_next;
+ u16 sz_m1;
+ u16 head;
+ u16 wqe_ctr;
+ u16 cur_sz;
+ u8 log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+ int equal = (cc1 == cc2);
+ int smaller = 0x8000 & (cc1 - cc2);
+
+ return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+ return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+ return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+ wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+ *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+ return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+ wq->head = head_next;
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+ __be16 *next_tail_next)
+{
+ *wq->tail_next = ix;
+ wq->tail_next = next_tail_next;
+ wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 6f332ebdf3b5..75dc46c5fca2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work)
wake_up_interruptible(
&hw_priv->counter[i].counter);
}
- } else if (jiffies >= hw_priv->counter[i].time) {
+ } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
/* Only read MIB counters when the port is connected. */
if (media_connected == mib->state)
hw_priv->counter[i].read = 1;
@@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr)
/* This is used to verify Wake-on-LAN is working. */
if (hw_priv->pme_wait) {
- if (hw_priv->pme_wait <= jiffies) {
+ if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
hw_clr_wol_pme_status(&hw_priv->hw);
hw_priv->pme_wait = 0;
}
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 81d0f1c86d6d..becbb5f1f5a7 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -244,7 +244,6 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(&priv->napi, skb);
rx++;
- ndev->last_rx = jiffies;
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
if (desc0 & RX_DESC0_MULTICAST)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 1e0f72b65459..c28111749e1f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5308,7 +5308,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
/**
* s2io_ethtool_sset - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @info: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
@@ -5793,7 +5794,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 9e1aaa7f36bb..5f630a24e491 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1004,8 +1004,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
- void **tmp_arr;
-
if (channel->reserve_ptr - channel->reserve_top > 0) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_ptr];
@@ -1020,10 +1018,7 @@ _alloc_after_swap:
* i.e. no additional lock need to be done when we free a resource */
if (channel->length - channel->free_ptr > 0) {
-
- tmp_arr = channel->reserve_arr;
- channel->reserve_arr = channel->free_arr;
- channel->free_arr = tmp_arr;
+ swap(channel->reserve_arr, channel->free_arr);
channel->reserve_ptr = channel->length;
channel->reserve_top = channel->free_ptr;
channel->free_ptr = channel->length;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f221126a5c4e..055f3763e577 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1326,9 +1326,6 @@ struct qlcnic_eswitch {
};
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM -1
-
#define MAX_BW 100 /* % of link speed */
#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 367f3976df56..2f6cc423ab1d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
pfn = pci_info[i].id;
if (pfn >= ahw->max_vnic_func) {
- ret = QL_STATUS_INVALID_PARAM;
+ ret = -EINVAL;
dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
__func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 59a721fba018..05c28f2c6df7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -24,8 +24,6 @@
#include <linux/hwmon-sysfs.h>
#endif
-#define QLC_STATUS_UNSUPPORTED_CMD -2
-
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
u8 b_state, b_rate;
if (len != sizeof(u16))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memcpy(&beacon, buf, sizeof(u16));
err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
if (src_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
if (dest_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
s_esw_id = adapter->npars[src_index].phy_port;
d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
@@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
action = !!pm_cfg[i].action;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].mac_override != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].promisc_mode != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
break;
case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
case QLCNIC_DEL_VLAN:
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
default:
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
}
@@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
@@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
!IS_VALID_BW(np_cfg[i].max_bw))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
rem = size % sizeof(struct qlcnic_npar_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&port_stats, 0, size);
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&esw_stats, 0, size);
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
- return QL_STATUS_INVALID_PARAM;
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
count = size / sizeof(u32);
@@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
-
ret = kstrtoul(buf, 16, &data);
switch (data) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 25800a1dedcb..02b7115b6aaa 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3871,9 +3871,6 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
return status;
}
- end_jiffies = jiffies +
- max((unsigned long)1, usecs_to_jiffies(30));
-
/* Check if bit is set then skip the mailbox command and
* clear the bit, else we are in normal reset process.
*/
@@ -3888,6 +3885,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
+ end_jiffies = jiffies + usecs_to_jiffies(30);
do {
value = ql_read32(qdev, RST_FO);
if ((value & RST_FO_FR) == 0)
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 6af028d5f9bc..2f87909f5186 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -839,7 +839,7 @@ static const struct of_device_id qca_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, qca_spi_of_match);
static int
-qca_spi_probe(struct spi_device *spi_device)
+qca_spi_probe(struct spi_device *spi)
{
struct qcaspi *qca = NULL;
struct net_device *qcaspi_devs = NULL;
@@ -847,52 +847,52 @@ qca_spi_probe(struct spi_device *spi_device)
u16 signature;
const char *mac;
- if (!spi_device->dev.of_node) {
- dev_err(&spi_device->dev, "Missing device tree\n");
+ if (!spi->dev.of_node) {
+ dev_err(&spi->dev, "Missing device tree\n");
return -EINVAL;
}
- legacy_mode = of_property_read_bool(spi_device->dev.of_node,
+ legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
if (qcaspi_clkspeed == 0) {
- if (spi_device->max_speed_hz)
- qcaspi_clkspeed = spi_device->max_speed_hz;
+ if (spi->max_speed_hz)
+ qcaspi_clkspeed = spi->max_speed_hz;
else
qcaspi_clkspeed = QCASPI_CLK_SPEED;
}
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi_device->dev, "Invalid clkspeed: %d\n",
+ dev_info(&spi->dev, "Invalid clkspeed: %d\n",
qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi_device->dev, "Invalid burst len: %d\n",
+ dev_info(&spi->dev, "Invalid burst len: %d\n",
qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi_device->dev, "Invalid pluggable: %d\n",
+ dev_info(&spi->dev, "Invalid pluggable: %d\n",
qcaspi_pluggable);
return -EINVAL;
}
- dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+ dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
qcaspi_clkspeed,
qcaspi_burst_len,
qcaspi_pluggable);
- spi_device->mode = SPI_MODE_3;
- spi_device->max_speed_hz = qcaspi_clkspeed;
- if (spi_setup(spi_device) < 0) {
- dev_err(&spi_device->dev, "Unable to setup SPI device\n");
+ spi->mode = SPI_MODE_3;
+ spi->max_speed_hz = qcaspi_clkspeed;
+ if (spi_setup(spi) < 0) {
+ dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;
}
@@ -905,23 +905,23 @@ qca_spi_probe(struct spi_device *spi_device)
qca = netdev_priv(qcaspi_devs);
if (!qca) {
free_netdev(qcaspi_devs);
- dev_err(&spi_device->dev, "Fail to retrieve private structure\n");
+ dev_err(&spi->dev, "Fail to retrieve private structure\n");
return -ENOMEM;
}
qca->net_dev = qcaspi_devs;
- qca->spi_dev = spi_device;
+ qca->spi_dev = spi;
qca->legacy_mode = legacy_mode;
- spi_set_drvdata(spi_device, qcaspi_devs);
+ spi_set_drvdata(spi, qcaspi_devs);
- mac = of_get_mac_address(spi_device->dev.of_node);
+ mac = of_get_mac_address(spi->dev.of_node);
if (mac)
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
eth_hw_addr_random(qca->net_dev);
- dev_info(&spi_device->dev, "Using random MAC address: %pM\n",
+ dev_info(&spi->dev, "Using random MAC address: %pM\n",
qca->net_dev->dev_addr);
}
@@ -932,7 +932,7 @@ qca_spi_probe(struct spi_device *spi_device)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n",
+ dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
signature);
free_netdev(qcaspi_devs);
return -EFAULT;
@@ -940,7 +940,7 @@ qca_spi_probe(struct spi_device *spi_device)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi_device->dev, "Unable to register net device %s\n",
+ dev_info(&spi->dev, "Unable to register net device %s\n",
qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
@@ -952,9 +952,9 @@ qca_spi_probe(struct spi_device *spi_device)
}
static int
-qca_spi_remove(struct spi_device *spi_device)
+qca_spi_remove(struct spi_device *spi)
{
- struct net_device *qcaspi_devs = spi_get_drvdata(spi_device);
+ struct net_device *qcaspi_devs = spi_get_drvdata(spi);
struct qcaspi *qca = netdev_priv(qcaspi_devs);
qcaspi_remove_device_debugfs(qca);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 196e98a2d93b..8e8031a1c6c7 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -2,6 +2,21 @@
# Renesas device configuration
#
+config NET_VENDOR_RENESAS
+ bool "Renesas devices"
+ default y
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Renesas devices. If you say Y, you will be asked
+ for your specific device in the following questions.
+
+if NET_VENDOR_RENESAS
+
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
@@ -15,3 +30,19 @@ config SH_ETH
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
R8A7740, R8A777x and R8A779x.
+
+config RAVB
+ tristate "Renesas Ethernet AVB support"
+ depends on HAS_DMA
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select CRC32
+ select MII
+ select MDIO_BITBANG
+ select PHYLIB
+ select PTP_1588_CLOCK
+ help
+ Renesas Ethernet AVB device driver.
+ This driver supports the following SoCs:
+ - R8A779x.
+
+endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 1c278a8e066a..a05102a7df02 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -3,3 +3,7 @@
#
obj-$(CONFIG_SH_ETH) += sh_eth.o
+
+ravb-objs := ravb_main.o ravb_ptp.o
+
+obj-$(CONFIG_RAVB) += ravb.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
new file mode 100644
index 000000000000..8aa50ac4e2d6
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -0,0 +1,832 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RAVB_H__
+#define __RAVB_H__
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
+#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
+#define NC_TX_RING_SIZE 64 /* TX ring size for Network Control */
+#define NC_RX_RING_SIZE 64 /* RX ring size for Network Control */
+#define BE_TX_RING_MIN 64
+#define BE_RX_RING_MIN 64
+#define BE_TX_RING_MAX 1024
+#define BE_RX_RING_MAX 2048
+
+#define PKT_BUF_SZ 1538
+
+/* Driver's parameters */
+#define RAVB_ALIGN 128
+
+/* Hardware time stamp */
+#define RAVB_TXTSTAMP_VALID 0x00000001 /* TX timestamp valid */
+#define RAVB_TXTSTAMP_ENABLED 0x00000010 /* Enable TX timestamping */
+
+#define RAVB_RXTSTAMP_VALID 0x00000001 /* RX timestamp valid */
+#define RAVB_RXTSTAMP_TYPE 0x00000006 /* RX type mask */
+#define RAVB_RXTSTAMP_TYPE_V2_L2_EVENT 0x00000002
+#define RAVB_RXTSTAMP_TYPE_ALL 0x00000006
+#define RAVB_RXTSTAMP_ENABLED 0x00000010 /* Enable RX timestamping */
+
+enum ravb_reg {
+ /* AVB-DMAC registers */
+ CCC = 0x0000,
+ DBAT = 0x0004,
+ DLR = 0x0008,
+ CSR = 0x000C,
+ CDAR0 = 0x0010,
+ CDAR1 = 0x0014,
+ CDAR2 = 0x0018,
+ CDAR3 = 0x001C,
+ CDAR4 = 0x0020,
+ CDAR5 = 0x0024,
+ CDAR6 = 0x0028,
+ CDAR7 = 0x002C,
+ CDAR8 = 0x0030,
+ CDAR9 = 0x0034,
+ CDAR10 = 0x0038,
+ CDAR11 = 0x003C,
+ CDAR12 = 0x0040,
+ CDAR13 = 0x0044,
+ CDAR14 = 0x0048,
+ CDAR15 = 0x004C,
+ CDAR16 = 0x0050,
+ CDAR17 = 0x0054,
+ CDAR18 = 0x0058,
+ CDAR19 = 0x005C,
+ CDAR20 = 0x0060,
+ CDAR21 = 0x0064,
+ ESR = 0x0088,
+ RCR = 0x0090,
+ RQC0 = 0x0094,
+ RQC1 = 0x0098,
+ RQC2 = 0x009C,
+ RQC3 = 0x00A0,
+ RQC4 = 0x00A4,
+ RPC = 0x00B0,
+ UFCW = 0x00BC,
+ UFCS = 0x00C0,
+ UFCV0 = 0x00C4,
+ UFCV1 = 0x00C8,
+ UFCV2 = 0x00CC,
+ UFCV3 = 0x00D0,
+ UFCV4 = 0x00D4,
+ UFCD0 = 0x00E0,
+ UFCD1 = 0x00E4,
+ UFCD2 = 0x00E8,
+ UFCD3 = 0x00EC,
+ UFCD4 = 0x00F0,
+ SFO = 0x00FC,
+ SFP0 = 0x0100,
+ SFP1 = 0x0104,
+ SFP2 = 0x0108,
+ SFP3 = 0x010C,
+ SFP4 = 0x0110,
+ SFP5 = 0x0114,
+ SFP6 = 0x0118,
+ SFP7 = 0x011C,
+ SFP8 = 0x0120,
+ SFP9 = 0x0124,
+ SFP10 = 0x0128,
+ SFP11 = 0x012C,
+ SFP12 = 0x0130,
+ SFP13 = 0x0134,
+ SFP14 = 0x0138,
+ SFP15 = 0x013C,
+ SFP16 = 0x0140,
+ SFP17 = 0x0144,
+ SFP18 = 0x0148,
+ SFP19 = 0x014C,
+ SFP20 = 0x0150,
+ SFP21 = 0x0154,
+ SFP22 = 0x0158,
+ SFP23 = 0x015C,
+ SFP24 = 0x0160,
+ SFP25 = 0x0164,
+ SFP26 = 0x0168,
+ SFP27 = 0x016C,
+ SFP28 = 0x0170,
+ SFP29 = 0x0174,
+ SFP30 = 0x0178,
+ SFP31 = 0x017C,
+ SFM0 = 0x01C0,
+ SFM1 = 0x01C4,
+ TGC = 0x0300,
+ TCCR = 0x0304,
+ TSR = 0x0308,
+ TFA0 = 0x0310,
+ TFA1 = 0x0314,
+ TFA2 = 0x0318,
+ CIVR0 = 0x0320,
+ CIVR1 = 0x0324,
+ CDVR0 = 0x0328,
+ CDVR1 = 0x032C,
+ CUL0 = 0x0330,
+ CUL1 = 0x0334,
+ CLL0 = 0x0338,
+ CLL1 = 0x033C,
+ DIC = 0x0350,
+ DIS = 0x0354,
+ EIC = 0x0358,
+ EIS = 0x035C,
+ RIC0 = 0x0360,
+ RIS0 = 0x0364,
+ RIC1 = 0x0368,
+ RIS1 = 0x036C,
+ RIC2 = 0x0370,
+ RIS2 = 0x0374,
+ TIC = 0x0378,
+ TIS = 0x037C,
+ ISS = 0x0380,
+ GCCR = 0x0390,
+ GMTT = 0x0394,
+ GPTC = 0x0398,
+ GTI = 0x039C,
+ GTO0 = 0x03A0,
+ GTO1 = 0x03A4,
+ GTO2 = 0x03A8,
+ GIC = 0x03AC,
+ GIS = 0x03B0,
+ GCPT = 0x03B4, /* Undocumented? */
+ GCT0 = 0x03B8,
+ GCT1 = 0x03BC,
+ GCT2 = 0x03C0,
+
+ /* E-MAC registers */
+ ECMR = 0x0500,
+ RFLR = 0x0508,
+ ECSR = 0x0510,
+ ECSIPR = 0x0518,
+ PIR = 0x0520,
+ PSR = 0x0528,
+ PIPR = 0x052c,
+ MPR = 0x0558,
+ PFTCR = 0x055c,
+ PFRCR = 0x0560,
+ GECMR = 0x05b0,
+ MAHR = 0x05c0,
+ MALR = 0x05c8,
+ TROCR = 0x0700, /* Undocumented? */
+ CDCR = 0x0708, /* Undocumented? */
+ LCCR = 0x0710, /* Undocumented? */
+ CEFCR = 0x0740,
+ FRECR = 0x0748,
+ TSFRCR = 0x0750,
+ TLFRCR = 0x0758,
+ RFCR = 0x0760,
+ CERCR = 0x0768, /* Undocumented? */
+ CEECR = 0x0770, /* Undocumented? */
+ MAFCR = 0x0778,
+};
+
+
+/* Register bits of the Ethernet AVB */
+/* CCC */
+enum CCC_BIT {
+ CCC_OPC = 0x00000003,
+ CCC_OPC_RESET = 0x00000000,
+ CCC_OPC_CONFIG = 0x00000001,
+ CCC_OPC_OPERATION = 0x00000002,
+ CCC_DTSR = 0x00000100,
+ CCC_CSEL = 0x00030000,
+ CCC_CSEL_HPB = 0x00010000,
+ CCC_CSEL_ETH_TX = 0x00020000,
+ CCC_CSEL_GMII_REF = 0x00030000,
+ CCC_BOC = 0x00100000, /* Undocumented? */
+ CCC_LBME = 0x01000000,
+};
+
+/* CSR */
+enum CSR_BIT {
+ CSR_OPS = 0x0000000F,
+ CSR_OPS_RESET = 0x00000001,
+ CSR_OPS_CONFIG = 0x00000002,
+ CSR_OPS_OPERATION = 0x00000004,
+ CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_DTS = 0x00000100,
+ CSR_TPO0 = 0x00010000,
+ CSR_TPO1 = 0x00020000,
+ CSR_TPO2 = 0x00040000,
+ CSR_TPO3 = 0x00080000,
+ CSR_RPO = 0x00100000,
+};
+
+/* ESR */
+enum ESR_BIT {
+ ESR_EQN = 0x0000001F,
+ ESR_ET = 0x00000F00,
+ ESR_EIL = 0x00001000,
+};
+
+/* RCR */
+enum RCR_BIT {
+ RCR_EFFS = 0x00000001,
+ RCR_ENCF = 0x00000002,
+ RCR_ESF = 0x0000000C,
+ RCR_ETS0 = 0x00000010,
+ RCR_ETS2 = 0x00000020,
+ RCR_RFCL = 0x1FFF0000,
+};
+
+/* RQC0/1/2/3/4 */
+enum RQC_BIT {
+ RQC_RSM0 = 0x00000003,
+ RQC_UFCC0 = 0x00000030,
+ RQC_RSM1 = 0x00000300,
+ RQC_UFCC1 = 0x00003000,
+ RQC_RSM2 = 0x00030000,
+ RQC_UFCC2 = 0x00300000,
+ RQC_RSM3 = 0x03000000,
+ RQC_UFCC3 = 0x30000000,
+};
+
+/* RPC */
+enum RPC_BIT {
+ RPC_PCNT = 0x00000700,
+ RPC_DCNT = 0x00FF0000,
+};
+
+/* UFCW */
+enum UFCW_BIT {
+ UFCW_WL0 = 0x0000003F,
+ UFCW_WL1 = 0x00003F00,
+ UFCW_WL2 = 0x003F0000,
+ UFCW_WL3 = 0x3F000000,
+};
+
+/* UFCS */
+enum UFCS_BIT {
+ UFCS_SL0 = 0x0000003F,
+ UFCS_SL1 = 0x00003F00,
+ UFCS_SL2 = 0x003F0000,
+ UFCS_SL3 = 0x3F000000,
+};
+
+/* UFCV0/1/2/3/4 */
+enum UFCV_BIT {
+ UFCV_CV0 = 0x0000003F,
+ UFCV_CV1 = 0x00003F00,
+ UFCV_CV2 = 0x003F0000,
+ UFCV_CV3 = 0x3F000000,
+};
+
+/* UFCD0/1/2/3/4 */
+enum UFCD_BIT {
+ UFCD_DV0 = 0x0000003F,
+ UFCD_DV1 = 0x00003F00,
+ UFCD_DV2 = 0x003F0000,
+ UFCD_DV3 = 0x3F000000,
+};
+
+/* SFO */
+enum SFO_BIT {
+ SFO_FPB = 0x0000003F,
+};
+
+/* RTC */
+enum RTC_BIT {
+ RTC_MFL0 = 0x00000FFF,
+ RTC_MFL1 = 0x0FFF0000,
+};
+
+/* TGC */
+enum TGC_BIT {
+ TGC_TSM0 = 0x00000001,
+ TGC_TSM1 = 0x00000002,
+ TGC_TSM2 = 0x00000004,
+ TGC_TSM3 = 0x00000008,
+ TGC_TQP = 0x00000030,
+ TGC_TQP_NONAVB = 0x00000000,
+ TGC_TQP_AVBMODE1 = 0x00000010,
+ TGC_TQP_AVBMODE2 = 0x00000030,
+ TGC_TBD0 = 0x00000300,
+ TGC_TBD1 = 0x00003000,
+ TGC_TBD2 = 0x00030000,
+ TGC_TBD3 = 0x00300000,
+};
+
+/* TCCR */
+enum TCCR_BIT {
+ TCCR_TSRQ0 = 0x00000001,
+ TCCR_TSRQ1 = 0x00000002,
+ TCCR_TSRQ2 = 0x00000004,
+ TCCR_TSRQ3 = 0x00000008,
+ TCCR_TFEN = 0x00000100,
+ TCCR_TFR = 0x00000200,
+};
+
+/* TSR */
+enum TSR_BIT {
+ TSR_CCS0 = 0x00000003,
+ TSR_CCS1 = 0x0000000C,
+ TSR_TFFL = 0x00000700,
+};
+
+/* TFA2 */
+enum TFA2_BIT {
+ TFA2_TSV = 0x0000FFFF,
+ TFA2_TST = 0x03FF0000,
+};
+
+/* DIC */
+enum DIC_BIT {
+ DIC_DPE1 = 0x00000002,
+ DIC_DPE2 = 0x00000004,
+ DIC_DPE3 = 0x00000008,
+ DIC_DPE4 = 0x00000010,
+ DIC_DPE5 = 0x00000020,
+ DIC_DPE6 = 0x00000040,
+ DIC_DPE7 = 0x00000080,
+ DIC_DPE8 = 0x00000100,
+ DIC_DPE9 = 0x00000200,
+ DIC_DPE10 = 0x00000400,
+ DIC_DPE11 = 0x00000800,
+ DIC_DPE12 = 0x00001000,
+ DIC_DPE13 = 0x00002000,
+ DIC_DPE14 = 0x00004000,
+ DIC_DPE15 = 0x00008000,
+};
+
+/* DIS */
+enum DIS_BIT {
+ DIS_DPF1 = 0x00000002,
+ DIS_DPF2 = 0x00000004,
+ DIS_DPF3 = 0x00000008,
+ DIS_DPF4 = 0x00000010,
+ DIS_DPF5 = 0x00000020,
+ DIS_DPF6 = 0x00000040,
+ DIS_DPF7 = 0x00000080,
+ DIS_DPF8 = 0x00000100,
+ DIS_DPF9 = 0x00000200,
+ DIS_DPF10 = 0x00000400,
+ DIS_DPF11 = 0x00000800,
+ DIS_DPF12 = 0x00001000,
+ DIS_DPF13 = 0x00002000,
+ DIS_DPF14 = 0x00004000,
+ DIS_DPF15 = 0x00008000,
+};
+
+/* EIC */
+enum EIC_BIT {
+ EIC_MREE = 0x00000001,
+ EIC_MTEE = 0x00000002,
+ EIC_QEE = 0x00000004,
+ EIC_SEE = 0x00000008,
+ EIC_CLLE0 = 0x00000010,
+ EIC_CLLE1 = 0x00000020,
+ EIC_CULE0 = 0x00000040,
+ EIC_CULE1 = 0x00000080,
+ EIC_TFFE = 0x00000100,
+};
+
+/* EIS */
+enum EIS_BIT {
+ EIS_MREF = 0x00000001,
+ EIS_MTEF = 0x00000002,
+ EIS_QEF = 0x00000004,
+ EIS_SEF = 0x00000008,
+ EIS_CLLF0 = 0x00000010,
+ EIS_CLLF1 = 0x00000020,
+ EIS_CULF0 = 0x00000040,
+ EIS_CULF1 = 0x00000080,
+ EIS_TFFF = 0x00000100,
+ EIS_QFS = 0x00010000,
+};
+
+/* RIC0 */
+enum RIC0_BIT {
+ RIC0_FRE0 = 0x00000001,
+ RIC0_FRE1 = 0x00000002,
+ RIC0_FRE2 = 0x00000004,
+ RIC0_FRE3 = 0x00000008,
+ RIC0_FRE4 = 0x00000010,
+ RIC0_FRE5 = 0x00000020,
+ RIC0_FRE6 = 0x00000040,
+ RIC0_FRE7 = 0x00000080,
+ RIC0_FRE8 = 0x00000100,
+ RIC0_FRE9 = 0x00000200,
+ RIC0_FRE10 = 0x00000400,
+ RIC0_FRE11 = 0x00000800,
+ RIC0_FRE12 = 0x00001000,
+ RIC0_FRE13 = 0x00002000,
+ RIC0_FRE14 = 0x00004000,
+ RIC0_FRE15 = 0x00008000,
+ RIC0_FRE16 = 0x00010000,
+ RIC0_FRE17 = 0x00020000,
+};
+
+/* RIC0 */
+enum RIS0_BIT {
+ RIS0_FRF0 = 0x00000001,
+ RIS0_FRF1 = 0x00000002,
+ RIS0_FRF2 = 0x00000004,
+ RIS0_FRF3 = 0x00000008,
+ RIS0_FRF4 = 0x00000010,
+ RIS0_FRF5 = 0x00000020,
+ RIS0_FRF6 = 0x00000040,
+ RIS0_FRF7 = 0x00000080,
+ RIS0_FRF8 = 0x00000100,
+ RIS0_FRF9 = 0x00000200,
+ RIS0_FRF10 = 0x00000400,
+ RIS0_FRF11 = 0x00000800,
+ RIS0_FRF12 = 0x00001000,
+ RIS0_FRF13 = 0x00002000,
+ RIS0_FRF14 = 0x00004000,
+ RIS0_FRF15 = 0x00008000,
+ RIS0_FRF16 = 0x00010000,
+ RIS0_FRF17 = 0x00020000,
+};
+
+/* RIC1 */
+enum RIC1_BIT {
+ RIC1_RFWE = 0x80000000,
+};
+
+/* RIS1 */
+enum RIS1_BIT {
+ RIS1_RFWF = 0x80000000,
+};
+
+/* RIC2 */
+enum RIC2_BIT {
+ RIC2_QFE0 = 0x00000001,
+ RIC2_QFE1 = 0x00000002,
+ RIC2_QFE2 = 0x00000004,
+ RIC2_QFE3 = 0x00000008,
+ RIC2_QFE4 = 0x00000010,
+ RIC2_QFE5 = 0x00000020,
+ RIC2_QFE6 = 0x00000040,
+ RIC2_QFE7 = 0x00000080,
+ RIC2_QFE8 = 0x00000100,
+ RIC2_QFE9 = 0x00000200,
+ RIC2_QFE10 = 0x00000400,
+ RIC2_QFE11 = 0x00000800,
+ RIC2_QFE12 = 0x00001000,
+ RIC2_QFE13 = 0x00002000,
+ RIC2_QFE14 = 0x00004000,
+ RIC2_QFE15 = 0x00008000,
+ RIC2_QFE16 = 0x00010000,
+ RIC2_QFE17 = 0x00020000,
+ RIC2_RFFE = 0x80000000,
+};
+
+/* RIS2 */
+enum RIS2_BIT {
+ RIS2_QFF0 = 0x00000001,
+ RIS2_QFF1 = 0x00000002,
+ RIS2_QFF2 = 0x00000004,
+ RIS2_QFF3 = 0x00000008,
+ RIS2_QFF4 = 0x00000010,
+ RIS2_QFF5 = 0x00000020,
+ RIS2_QFF6 = 0x00000040,
+ RIS2_QFF7 = 0x00000080,
+ RIS2_QFF8 = 0x00000100,
+ RIS2_QFF9 = 0x00000200,
+ RIS2_QFF10 = 0x00000400,
+ RIS2_QFF11 = 0x00000800,
+ RIS2_QFF12 = 0x00001000,
+ RIS2_QFF13 = 0x00002000,
+ RIS2_QFF14 = 0x00004000,
+ RIS2_QFF15 = 0x00008000,
+ RIS2_QFF16 = 0x00010000,
+ RIS2_QFF17 = 0x00020000,
+ RIS2_RFFF = 0x80000000,
+};
+
+/* TIC */
+enum TIC_BIT {
+ TIC_FTE0 = 0x00000001, /* Undocumented? */
+ TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_TFUE = 0x00000100,
+ TIC_TFWE = 0x00000200,
+};
+
+/* TIS */
+enum TIS_BIT {
+ TIS_FTF0 = 0x00000001, /* Undocumented? */
+ TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_TFUF = 0x00000100,
+ TIS_TFWF = 0x00000200,
+};
+
+/* ISS */
+enum ISS_BIT {
+ ISS_FRS = 0x00000001, /* Undocumented? */
+ ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_ES = 0x00000040,
+ ISS_MS = 0x00000080,
+ ISS_TFUS = 0x00000100,
+ ISS_TFWS = 0x00000200,
+ ISS_RFWS = 0x00001000,
+ ISS_CGIS = 0x00002000,
+ ISS_DPS1 = 0x00020000,
+ ISS_DPS2 = 0x00040000,
+ ISS_DPS3 = 0x00080000,
+ ISS_DPS4 = 0x00100000,
+ ISS_DPS5 = 0x00200000,
+ ISS_DPS6 = 0x00400000,
+ ISS_DPS7 = 0x00800000,
+ ISS_DPS8 = 0x01000000,
+ ISS_DPS9 = 0x02000000,
+ ISS_DPS10 = 0x04000000,
+ ISS_DPS11 = 0x08000000,
+ ISS_DPS12 = 0x10000000,
+ ISS_DPS13 = 0x20000000,
+ ISS_DPS14 = 0x40000000,
+ ISS_DPS15 = 0x80000000,
+};
+
+/* GCCR */
+enum GCCR_BIT {
+ GCCR_TCR = 0x00000003,
+ GCCR_TCR_NOREQ = 0x00000000, /* No request */
+ GCCR_TCR_RESET = 0x00000001, /* gPTP/AVTP presentation timer reset */
+ GCCR_TCR_CAPTURE = 0x00000003, /* Capture value set in GCCR.TCSS */
+ GCCR_LTO = 0x00000004,
+ GCCR_LTI = 0x00000008,
+ GCCR_LPTC = 0x00000010,
+ GCCR_LMTT = 0x00000020,
+ GCCR_TCSS = 0x00000300,
+ GCCR_TCSS_GPTP = 0x00000000, /* gPTP timer value */
+ GCCR_TCSS_ADJGPTP = 0x00000100, /* Adjusted gPTP timer value */
+ GCCR_TCSS_AVTP = 0x00000200, /* AVTP presentation time value */
+};
+
+/* GTI */
+enum GTI_BIT {
+ GTI_TIV = 0x0FFFFFFF,
+};
+
+/* GIC */
+enum GIC_BIT {
+ GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTME = 0x00000004,
+};
+
+/* GIS */
+enum GIS_BIT {
+ GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTMF = 0x00000004,
+};
+
+/* ECMR */
+enum ECMR_BIT {
+ ECMR_PRM = 0x00000001,
+ ECMR_DM = 0x00000002,
+ ECMR_TE = 0x00000020,
+ ECMR_RE = 0x00000040,
+ ECMR_MPDE = 0x00000200,
+ ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_RXF = 0x00020000,
+ ECMR_PFR = 0x00040000,
+ ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_RZPF = 0x00100000,
+ ECMR_DPAD = 0x00200000,
+ ECMR_RCSC = 0x00800000,
+ ECMR_TRCCM = 0x04000000,
+};
+
+/* ECSR */
+enum ECSR_BIT {
+ ECSR_ICD = 0x00000001,
+ ECSR_MPD = 0x00000002,
+ ECSR_LCHNG = 0x00000004,
+ ECSR_PHYI = 0x00000008,
+};
+
+/* ECSIPR */
+enum ECSIPR_BIT {
+ ECSIPR_ICDIP = 0x00000001,
+ ECSIPR_MPDIP = 0x00000002,
+ ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+};
+
+/* PIR */
+enum PIR_BIT {
+ PIR_MDC = 0x00000001,
+ PIR_MMD = 0x00000002,
+ PIR_MDO = 0x00000004,
+ PIR_MDI = 0x00000008,
+};
+
+/* PSR */
+enum PSR_BIT {
+ PSR_LMON = 0x00000001,
+};
+
+/* PIPR */
+enum PIPR_BIT {
+ PIPR_PHYIP = 0x00000001,
+};
+
+/* MPR */
+enum MPR_BIT {
+ MPR_MP = 0x0000ffff,
+};
+
+/* GECMR */
+enum GECMR_BIT {
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+};
+
+/* The Ethernet AVB descriptor definitions. */
+struct ravb_desc {
+ __le16 ds; /* Descriptor size */
+ u8 cc; /* Content control MSBs (reserved) */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descriptor pointer */
+};
+
+enum DIE_DT {
+ /* Frame data */
+ DT_FMID = 0x40,
+ DT_FSTART = 0x50,
+ DT_FEND = 0x60,
+ DT_FSINGLE = 0x70,
+ /* Chain control */
+ DT_LINK = 0x80,
+ DT_LINKFIX = 0x90,
+ DT_EOS = 0xa0,
+ /* HW/SW arbitration */
+ DT_FEMPTY = 0xc0,
+ DT_FEMPTY_IS = 0xd0,
+ DT_FEMPTY_IC = 0xe0,
+ DT_FEMPTY_ND = 0xf0,
+ DT_LEMPTY = 0x20,
+ DT_EEMPTY = 0x30,
+};
+
+struct ravb_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control LSBs */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+struct ravb_ex_rx_desc {
+ __le16 ds_cc; /* Descriptor size and content control lower bits */
+ u8 msc; /* MAC status code */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+ __le32 ts_n; /* Timestampe nsec */
+ __le32 ts_sl; /* Timestamp low */
+ __le16 ts_sh; /* Timestamp high */
+ __le16 res; /* Reserved bits */
+};
+
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+/* E-MAC status code */
+enum MSC_BIT {
+ MSC_CRC = 0x01, /* Frame CRC error */
+ MSC_RFE = 0x02, /* Frame reception error (flagged by PHY) */
+ MSC_RTSF = 0x04, /* Frame length error (frame too short) */
+ MSC_RTLF = 0x08, /* Frame length error (frame too long) */
+ MSC_FRE = 0x10, /* Fraction error (not a multiple of 8 bits) */
+ MSC_CRL = 0x20, /* Carrier lost */
+ MSC_CEEF = 0x40, /* Carrier extension error */
+ MSC_MC = 0x80, /* Multicast frame reception */
+};
+
+struct ravb_tx_desc {
+ __le16 ds_tagl; /* Descriptor size and frame tag LSBs */
+ u8 tagh_tsr; /* Frame tag MSBs and timestamp storage request bit */
+ u8 die_dt; /* Descriptor interrupt enable and type */
+ __le32 dptr; /* Descpriptor pointer */
+};
+
+enum TX_DS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum TX_TAGH_TSR_BIT {
+ TX_TAGH = 0x3f, /* Frame tag MSBs */
+ TX_TSR = 0x40, /* Timestamp storage request */
+};
+enum RAVB_QUEUE {
+ RAVB_BE = 0, /* Best Effort Queue */
+ RAVB_NC, /* Network Control Queue */
+};
+
+#define DBAT_ENTRY_NUM 22
+#define RX_QUEUE_OFFSET 4
+#define NUM_RX_QUEUE 2
+#define NUM_TX_QUEUE 2
+
+struct ravb_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+ u16 tag;
+};
+
+struct ravb_ptp_perout {
+ u32 target;
+ u32 period;
+};
+
+#define N_EXT_TS 1
+#define N_PER_OUT 1
+
+struct ravb_ptp {
+ struct ptp_clock *clock;
+ struct ptp_clock_info info;
+ u32 default_addend;
+ u32 current_addend;
+ int extts[N_EXT_TS];
+ struct ravb_ptp_perout perout[N_PER_OUT];
+};
+
+struct ravb_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *addr;
+ struct mdiobb_ctrl mdiobb;
+ u32 num_rx_ring[NUM_RX_QUEUE];
+ u32 num_tx_ring[NUM_TX_QUEUE];
+ u32 desc_bat_size;
+ dma_addr_t desc_bat_dma;
+ struct ravb_desc *desc_bat;
+ dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
+ dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+ struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct sk_buff **tx_skb[NUM_TX_QUEUE];
+ void **tx_buffers[NUM_TX_QUEUE];
+ u32 rx_over_errors;
+ u32 rx_fifo_errors;
+ struct net_device_stats stats[NUM_RX_QUEUE];
+ u32 tstamp_tx_ctrl;
+ u32 tstamp_rx_ctrl;
+ struct list_head ts_skb_list;
+ u32 ts_skb_tag;
+ struct ravb_ptp ptp;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_rx[NUM_RX_QUEUE]; /* Consumer ring indices */
+ u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
+ u32 cur_tx[NUM_TX_QUEUE];
+ u32 dirty_tx[NUM_TX_QUEUE];
+ struct napi_struct napi[NUM_RX_QUEUE];
+ struct work_struct work;
+ /* MII transceiver section. */
+ struct mii_bus *mii_bus; /* MDIO bus control */
+ struct phy_device *phydev; /* PHY device control */
+ int link;
+ phy_interface_t phy_interface;
+ int msg_enable;
+ int speed;
+ int duplex;
+
+ unsigned no_avb_link:1;
+ unsigned avb_link_active_low:1;
+};
+
+static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return ioread32(priv->addr + reg);
+}
+
+static inline void ravb_write(struct net_device *ndev, u32 data,
+ enum ravb_reg reg)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ iowrite32(data, priv->addr + reg);
+}
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
+
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void ravb_ptp_stop(struct net_device *ndev);
+
+#endif /* #ifndef __RAVB_H__ */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
new file mode 100644
index 000000000000..fd9745714d90
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -0,0 +1,1824 @@
+/* Renesas Ethernet AVB device driver
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * Based on the SuperH Ethernet driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/cache.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "ravb.h"
+
+#define RAVB_DEF_MSG_ENABLE \
+ (NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
+{
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ if ((ravb_read(ndev, reg) & mask) == value)
+ return 0;
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+}
+
+static int ravb_config(struct net_device *ndev)
+{
+ int error;
+
+ /* Set config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+ /* Check if the operating mode is changed to the config mode */
+ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
+ if (error)
+ netdev_err(ndev, "failed to switch device to config mode\n");
+
+ return error;
+}
+
+static void ravb_set_duplex(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr = ravb_read(ndev, ECMR);
+
+ if (priv->duplex) /* Full */
+ ecmr |= ECMR_DM;
+ else /* Half */
+ ecmr &= ~ECMR_DM;
+ ravb_write(ndev, ecmr, ECMR);
+}
+
+static void ravb_set_rate(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 100: /* 100BASE */
+ ravb_write(ndev, GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GECMR_SPEED_1000, GECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ravb_set_buffer_align(struct sk_buff *skb)
+{
+ u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+
+ if (reserve)
+ skb_reserve(skb, RAVB_ALIGN - reserve);
+}
+
+/* Get MAC address from the MAC address registers
+ *
+ * Ethernet AVB device doesn't have ROM for MAC address.
+ * This function gets the MAC address that was used by a bootloader.
+ */
+static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
+{
+ if (mac) {
+ ether_addr_copy(ndev->dev_addr, mac);
+ } else {
+ ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24);
+ ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF;
+ ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF;
+ ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF;
+ ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF;
+ ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF;
+ }
+}
+
+static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+ u32 pir = ravb_read(priv->ndev, PIR);
+
+ if (set)
+ pir |= mask;
+ else
+ pir &= ~mask;
+ ravb_write(priv->ndev, pir, PIR);
+}
+
+/* MDC pin control */
+static void ravb_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDC, level);
+}
+
+/* Data I/O pin control */
+static void ravb_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MMD, output);
+}
+
+/* Set data bit */
+static void ravb_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ ravb_mdio_ctrl(ctrl, PIR_MDO, value);
+}
+
+/* Get data bit */
+static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct ravb_private *priv = container_of(ctrl, struct ravb_private,
+ mdiobb);
+
+ return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0;
+}
+
+/* MDIO bus control struct */
+static struct mdiobb_ops bb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = ravb_set_mdc,
+ .set_mdio_dir = ravb_set_mdio_dir,
+ .set_mdio_data = ravb_set_mdio_data,
+ .get_mdio_data = ravb_get_mdio_data,
+};
+
+/* Free skb's and DMA buffers for Ethernet AVB */
+static void ravb_ring_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ring_size;
+ int i;
+
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free TX skb ringbuffer */
+ if (priv->tx_skb[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ dev_kfree_skb(priv->tx_skb[q][i]);
+ }
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ if (priv->tx_buffers[q]) {
+ for (i = 0; i < priv->num_tx_ring[q]; i++)
+ kfree(priv->tx_buffers[q][i]);
+ }
+ kfree(priv->tx_buffers[q]);
+ priv->tx_buffers[q] = NULL;
+
+ if (priv->rx_ring[q]) {
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+ }
+
+ if (priv->tx_ring[q]) {
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] + 1);
+ dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
+}
+
+/* Format skb and descriptor buffer for Ethernet AVB */
+static void ravb_ring_format(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_ex_rx_desc *rx_desc = NULL;
+ struct ravb_tx_desc *tx_desc = NULL;
+ struct ravb_desc *desc = NULL;
+ int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ void *buffer;
+ int i;
+
+ priv->cur_rx[q] = 0;
+ priv->cur_tx[q] = 0;
+ priv->dirty_rx[q] = 0;
+ priv->dirty_tx[q] = 0;
+
+ memset(priv->rx_ring[q], 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ priv->rx_skb[q][i] = NULL;
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ break;
+ ravb_set_buffer_align(skb);
+ /* RX descriptor */
+ rx_desc = &priv->rx_ring[q][i];
+ /* The size of the buffer should be on 16-byte boundary. */
+ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+ priv->rx_skb[q][i] = skb;
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->rx_ring[q][i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+ priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
+
+ memset(priv->tx_ring[q], 0, tx_ring_size);
+ /* Build TX ring buffer */
+ for (i = 0; i < priv->num_tx_ring[q]; i++) {
+ priv->tx_skb[q][i] = NULL;
+ priv->tx_buffers[q][i] = NULL;
+ buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
+ if (!buffer)
+ break;
+ /* Aligned TX buffer */
+ priv->tx_buffers[q][i] = buffer;
+ tx_desc = &priv->tx_ring[q][i];
+ tx_desc->die_dt = DT_EEMPTY;
+ }
+ tx_desc = &priv->tx_ring[q][i];
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+ tx_desc->die_dt = DT_LINKFIX; /* type */
+
+ /* RX descriptor base address for best effort */
+ desc = &priv->desc_bat[RX_QUEUE_OFFSET + q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+
+ /* TX descriptor base address for best effort */
+ desc = &priv->desc_bat[q];
+ desc->die_dt = DT_LINKFIX; /* type */
+ desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
+}
+
+/* Init skb and descriptor buffer for Ethernet AVB */
+static int ravb_ring_init(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int ring_size;
+
+ /* Allocate RX and TX skb rings */
+ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_skb[q]), GFP_KERNEL);
+ if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ goto error;
+
+ /* Allocate rings for the aligned buffers */
+ priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
+ sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
+ if (!priv->tx_buffers[q])
+ goto error;
+
+ /* Allocate all RX descriptors. */
+ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
+ priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->rx_ring[q])
+ goto error;
+
+ priv->dirty_rx[q] = 0;
+
+ /* Allocate all TX descriptors. */
+ ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+ priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+ &priv->tx_desc_dma[q],
+ GFP_KERNEL);
+ if (!priv->tx_ring[q])
+ goto error;
+
+ return 0;
+
+error:
+ ravb_ring_free(ndev, q);
+
+ return -ENOMEM;
+}
+
+/* E-MAC init function */
+static void ravb_emac_init(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecmr;
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
+
+ /* PAUSE prohibition */
+ ecmr = ravb_read(ndev, ECMR);
+ ecmr &= ECMR_DM;
+ ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
+ ravb_write(ndev, ecmr, ECMR);
+
+ ravb_set_rate(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev,
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ ravb_write(ndev, 1, MPR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
+}
+
+/* Device init function for Ethernet AVB */
+static int ravb_dmac_init(struct net_device *ndev)
+{
+ int error;
+
+ /* Set CONFIG mode */
+ error = ravb_config(ndev);
+ if (error)
+ return error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
+
+#if defined(__LITTLE_ENDIAN)
+ ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+#else
+ ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+#endif
+
+ /* Set AVB RX */
+ ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+
+ /* Set FIFO size */
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+
+ /* Timestamp enable */
+ ravb_write(ndev, TCCR_TFEN, TCCR);
+
+ /* Interrupt enable: */
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
+ /* Receive FIFO full warning */
+ ravb_write(ndev, RIC1_RFWE, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
+ /* Frame transmitted, timestamp FIFO updated */
+ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ /* Setting the control will start the AVB-DMAC process. */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
+ CCC);
+
+ return 0;
+}
+
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry = 0;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+ desc = &priv->tx_ring[q][entry];
+ if (desc->die_dt != DT_FEMPTY)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry]) {
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ free_num++;
+ }
+ stats->tx_packets++;
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
+static void ravb_get_tx_tstamp(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb;
+ struct timespec64 ts;
+ u16 tag, tfa_tag;
+ int count;
+ u32 tfa2;
+
+ count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8;
+ while (count--) {
+ tfa2 = ravb_read(ndev, TFA2);
+ tfa_tag = (tfa2 & TFA2_TST) >> 16;
+ ts.tv_nsec = (u64)ravb_read(ndev, TFA0);
+ ts.tv_sec = ((u64)(tfa2 & TFA2_TSV) << 32) |
+ ravb_read(ndev, TFA1);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list,
+ list) {
+ skb = ts_skb->skb;
+ tag = ts_skb->tag;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ if (tag == tfa_tag) {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ break;
+ }
+ }
+ ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+ }
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) -
+ priv->cur_rx[q];
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_ex_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ struct timespec64 ts;
+ u16 pkt_len = 0;
+ u8 desc_status;
+ int limit;
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->rx_ring[q][entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF |
+ MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
+
+ skb = priv->rx_skb[q][entry];
+ priv->rx_skb[q][entry] = NULL;
+ dma_sync_single_for_cpu(&ndev->dev,
+ le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ get_ts &= (q == RAVB_NC) ?
+ RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
+ ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) <<
+ 32) | le32_to_cpu(desc->ts_sl);
+ ts.tv_nsec = le32_to_cpu(desc->ts_n);
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->rx_ring[q][entry];
+ /* The size of the buffer should be on 16-byte boundary. */
+ desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev,
+ PKT_BUF_SZ + RAVB_ALIGN - 1);
+ if (!skb)
+ break; /* Better luck next round. */
+ ravb_set_buffer_align(skb);
+ dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+ ALIGN(PKT_BUF_SZ, 16),
+ DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ le16_to_cpu(desc->ds_cc),
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+static void ravb_rcv_snd_disable(struct net_device *ndev)
+{
+ /* Disable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+}
+
+static void ravb_rcv_snd_enable(struct net_device *ndev)
+{
+ /* Enable TX and RX */
+ ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+}
+
+/* function for waiting dma process finished */
+static int ravb_stop_dma(struct net_device *ndev)
+{
+ int error;
+
+ /* Wait for stopping the hardware TX process */
+ error = ravb_wait(ndev, TCCR,
+ TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ if (error)
+ return error;
+
+ error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3,
+ 0);
+ if (error)
+ return error;
+
+ /* Stop the E-MAC's RX/TX processes. */
+ ravb_rcv_snd_disable(ndev);
+
+ /* Wait for stopping the RX DMA process */
+ error = ravb_wait(ndev, CSR, CSR_RPO, 0);
+ if (error)
+ return error;
+
+ /* Stop AVB-DMAC process */
+ return ravb_config(ndev);
+}
+
+/* E-MAC interrupt handler */
+static void ravb_emac_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ecsr, psr;
+
+ ecsr = ravb_read(ndev, ECSR);
+ ravb_write(ndev, ecsr, ECSR); /* clear interrupt */
+ if (ecsr & ECSR_ICD)
+ ndev->stats.tx_carrier_errors++;
+ if (ecsr & ECSR_LCHNG) {
+ /* Link changed */
+ if (priv->no_avb_link)
+ return;
+ psr = ravb_read(ndev, PSR);
+ if (priv->avb_link_active_low)
+ psr ^= PSR_LMON;
+ if (!(psr & PSR_LMON)) {
+ /* DIsable RX and TX */
+ ravb_rcv_snd_disable(ndev);
+ } else {
+ /* Enable RX and TX */
+ ravb_rcv_snd_enable(ndev);
+ }
+ }
+}
+
+/* Error interrupt handler */
+static void ravb_error_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 eis, ris2;
+
+ eis = ravb_read(ndev, EIS);
+ ravb_write(ndev, ~EIS_QFS, EIS);
+ if (eis & EIS_QFS) {
+ ris2 = ravb_read(ndev, RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF0)
+ priv->stats[RAVB_BE].rx_over_errors++;
+
+ /* Receive Descriptor Empty int */
+ if (ris2 & RIS2_QFF1)
+ priv->stats[RAVB_NC].rx_over_errors++;
+
+ /* Receive FIFO Overflow int */
+ if (ris2 & RIS2_RFFF)
+ priv->rx_fifo_errors++;
+ }
+}
+
+static irqreturn_t ravb_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Received and transmitted interrupts */
+ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+ int q;
+
+ /* Timestamp updated */
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Network control and best effort queue RX/TX */
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (((ris0 & ric0) & BIT(q)) ||
+ ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+ ravb_write(ndev, tic & ~BIT(q), TIC);
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ result = IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* E-MAC status summary */
+ if (iss & ISS_MS) {
+ ravb_emac_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ if (iss & ISS_CGIS)
+ result = ravb_ptp_interrupt(ndev);
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static int ravb_poll(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int q = napi - priv->napi;
+ int mask = BIT(q);
+ int quota = budget;
+ u32 ris0, tis;
+
+ for (;;) {
+ tis = ravb_read(ndev, TIS);
+ ris0 = ravb_read(ndev, RIS0);
+ if (!((ris0 & mask) || (tis & mask)))
+ break;
+
+ /* Processing RX Descriptor Ring */
+ if (ris0 & mask) {
+ /* Clear RX interrupt */
+ ravb_write(ndev, ~mask, RIS0);
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
+ /* Processing TX Descriptor Ring */
+ if (tis & mask) {
+ spin_lock_irqsave(&priv->lock, flags);
+ /* Clear TX interrupt */
+ ravb_write(ndev, ~mask, TIS);
+ ravb_tx_free(ndev, q);
+ netif_wake_subqueue(ndev, q);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ }
+
+ napi_complete(napi);
+
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
+ ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Receive error message handling */
+ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ ndev->stats.rx_over_errors = priv->rx_over_errors;
+ netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
+ }
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
+ netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
+ }
+out:
+ return budget - quota;
+}
+
+/* PHY state control function */
+static void ravb_adjust_link(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex(ndev);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ ravb_set_rate(ndev);
+ }
+ if (!priv->link) {
+ ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
+ ECMR);
+ new_state = true;
+ priv->link = phydev->link;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_enable(ndev);
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+ if (priv->no_avb_link)
+ ravb_rcv_snd_disable(ndev);
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+/* PHY init function */
+static int ravb_phy_init(struct net_device *ndev)
+{
+ struct device_node *np = ndev->dev.parent->of_node;
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+ struct device_node *pn;
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+
+ /* Try connecting to PHY */
+ pn = of_parse_phandle(np, "phy-handle", 0);
+ phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "failed to connect PHY\n");
+ return -ENOENT;
+ }
+
+ netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+ phydev->addr, phydev->irq, phydev->drv->name);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/* PHY control start function */
+static int ravb_phy_start(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ error = ravb_phy_init(ndev);
+ if (error)
+ return error;
+
+ phy_start(priv->phydev);
+
+ return 0;
+}
+
+static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_ethtool_gset(priv->phydev, ecmd);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int error;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX */
+ ravb_rcv_snd_disable(ndev);
+
+ error = phy_ethtool_sset(priv->phydev, ecmd);
+ if (error)
+ goto error_exit;
+
+ if (ecmd->duplex == DUPLEX_FULL)
+ priv->duplex = 1;
+ else
+ priv->duplex = 0;
+
+ ravb_set_duplex(ndev);
+
+error_exit:
+ mdelay(1);
+
+ /* Enable TX and RX */
+ ravb_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_nway_reset(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+ unsigned long flags;
+
+ if (priv->phydev) {
+ spin_lock_irqsave(&priv->lock, flags);
+ error = phy_start_aneg(priv->phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return error;
+}
+
+static u32 ravb_get_msglevel(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void ravb_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_missed_errors",
+ "rx_queue_0_over_errors",
+
+ "rx_queue_1_current",
+ "tx_queue_1_current",
+ "rx_queue_1_dirty",
+ "tx_queue_1_dirty",
+ "rx_queue_1_packets",
+ "tx_queue_1_packets",
+ "rx_queue_1_bytes",
+ "tx_queue_1_bytes",
+ "rx_queue_1_mcast_packets",
+ "rx_queue_1_errors",
+ "rx_queue_1_crc_errors",
+ "rx_queue_1_frame_errors_",
+ "rx_queue_1_length_errors",
+ "rx_queue_1_missed_errors",
+ "rx_queue_1_over_errors",
+};
+
+#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
+
+static int ravb_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return RAVB_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ravb_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int i = 0;
+ int q;
+
+ /* Device-specific stats */
+ for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ struct net_device_stats *stats = &priv->stats[q];
+
+ data[i++] = priv->cur_rx[q];
+ data[i++] = priv->cur_tx[q];
+ data[i++] = priv->dirty_rx[q];
+ data[i++] = priv->dirty_tx[q];
+ data[i++] = stats->rx_packets;
+ data[i++] = stats->tx_packets;
+ data[i++] = stats->rx_bytes;
+ data[i++] = stats->tx_bytes;
+ data[i++] = stats->multicast;
+ data[i++] = stats->rx_errors;
+ data[i++] = stats->rx_crc_errors;
+ data[i++] = stats->rx_frame_errors;
+ data[i++] = stats->rx_length_errors;
+ data[i++] = stats->rx_missed_errors;
+ data[i++] = stats->rx_over_errors;
+ }
+}
+
+static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ravb_gstrings_stats, sizeof(ravb_gstrings_stats));
+ break;
+ }
+}
+
+static void ravb_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = BE_RX_RING_MAX;
+ ring->tx_max_pending = BE_TX_RING_MAX;
+ ring->rx_pending = priv->num_rx_ring[RAVB_BE];
+ ring->tx_pending = priv->num_tx_ring[RAVB_BE];
+}
+
+static int ravb_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ if (ring->tx_pending > BE_TX_RING_MAX ||
+ ring->rx_pending > BE_RX_RING_MAX ||
+ ring->tx_pending < BE_TX_RING_MIN ||
+ ring->rx_pending < BE_RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+ /* Wait for DMA stopping */
+ error = ravb_stop_dma(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "cannot set ringparam! Any AVB processes are still running?\n");
+ return error;
+ }
+ synchronize_irq(ndev->irq);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+ }
+
+ /* Set new parameters */
+ priv->num_rx_ring[RAVB_BE] = ring->rx_pending;
+ priv->num_tx_ring[RAVB_BE] = ring->tx_pending;
+
+ if (netif_running(ndev)) {
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ netdev_err(ndev,
+ "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return error;
+ }
+
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_device_attach(ndev);
+ }
+
+ return 0;
+}
+
+static int ravb_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
+
+ return 0;
+}
+
+static const struct ethtool_ops ravb_ethtool_ops = {
+ .get_settings = ravb_get_settings,
+ .set_settings = ravb_set_settings,
+ .nway_reset = ravb_nway_reset,
+ .get_msglevel = ravb_get_msglevel,
+ .set_msglevel = ravb_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ravb_get_strings,
+ .get_ethtool_stats = ravb_get_ethtool_stats,
+ .get_sset_count = ravb_get_sset_count,
+ .get_ringparam = ravb_get_ringparam,
+ .set_ringparam = ravb_set_ringparam,
+ .get_ts_info = ravb_get_ts_info,
+};
+
+/* Network device open function for Ethernet AVB */
+static int ravb_open(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int error;
+
+ napi_enable(&priv->napi[RAVB_BE]);
+ napi_enable(&priv->napi[RAVB_NC]);
+
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
+ ndev);
+ if (error) {
+ netdev_err(ndev, "cannot request IRQ\n");
+ goto out_napi_off;
+ }
+
+ /* Device init */
+ error = ravb_dmac_init(ndev);
+ if (error)
+ goto out_free_irq;
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+
+ /* PHY control start */
+ error = ravb_phy_start(ndev);
+ if (error)
+ goto out_ptp_stop;
+
+ return 0;
+
+out_ptp_stop:
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+out_free_irq:
+ free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+ return error;
+}
+
+/* Timeout function for Ethernet AVB */
+static void ravb_tx_timeout(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ netif_err(priv, tx_err, ndev,
+ "transmit timed out, status %08x, resetting...\n",
+ ravb_read(ndev, ISS));
+
+ /* tx_errors count up */
+ ndev->stats.tx_errors++;
+
+ schedule_work(&priv->work);
+}
+
+static void ravb_tx_timeout_work(struct work_struct *work)
+{
+ struct ravb_private *priv = container_of(work, struct ravb_private,
+ work);
+ struct net_device *ndev = priv->ndev;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Wait for DMA stopping */
+ ravb_stop_dma(ndev);
+
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ /* Device init */
+ ravb_dmac_init(ndev);
+ ravb_emac_init(ndev);
+
+ /* Initialise PTP Clock driver */
+ ravb_ptp_init(ndev, priv->pdev);
+
+ netif_tx_start_all_queues(ndev);
+}
+
+/* Packet transmit function for Ethernet AVB */
+static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb = NULL;
+ u16 q = skb_get_queue_mapping(skb);
+ struct ravb_tx_desc *desc;
+ unsigned long flags;
+ u32 dma_addr;
+ void *buffer;
+ u32 entry;
+ u32 tccr;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+ netif_err(priv, tx_queued, ndev,
+ "still transmitting with the full ring!\n");
+ netif_stop_subqueue(ndev, q);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+ entry = priv->cur_tx[q] % priv->num_tx_ring[q];
+ priv->tx_skb[q][entry] = skb;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto drop;
+
+ buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
+ memcpy(buffer, skb->data, skb->len);
+ desc = &priv->tx_ring[q][entry];
+ desc->ds_tagl = cpu_to_le16(skb->len);
+ dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr))
+ goto drop;
+ desc->dptr = cpu_to_le32(dma_addr);
+
+ /* TX timestamp required */
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+ DMA_TO_DEVICE);
+ goto drop;
+ }
+ ts_skb->skb = skb;
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_tx_timestamp(skb);
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
+ }
+
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FSINGLE;
+
+ tccr = ravb_read(ndev, TCCR);
+ if (!(tccr & (TCCR_TSRQ0 << q)))
+ ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+
+ priv->cur_tx[q]++;
+ if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
+ !ravb_tx_free(ndev, q))
+ netif_stop_subqueue(ndev, q);
+
+exit:
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[q][entry] = NULL;
+ goto exit;
+}
+
+static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ /* If skb needs TX timestamp, it is handled in network control queue */
+ return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
+ RAVB_BE;
+
+}
+
+static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *nstats, *stats0, *stats1;
+
+ nstats = &ndev->stats;
+ stats0 = &priv->stats[RAVB_BE];
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->tx_dropped += ravb_read(ndev, TROCR);
+ ravb_write(ndev, 0, TROCR); /* (write clear) */
+ nstats->collisions += ravb_read(ndev, CDCR);
+ ravb_write(ndev, 0, CDCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, LCCR);
+ ravb_write(ndev, 0, LCCR); /* (write clear) */
+
+ nstats->tx_carrier_errors += ravb_read(ndev, CERCR);
+ ravb_write(ndev, 0, CERCR); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CEECR);
+ ravb_write(ndev, 0, CEECR); /* (write clear) */
+
+ nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
+ nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
+ nstats->multicast = stats0->multicast + stats1->multicast;
+ nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
+ nstats->rx_frame_errors =
+ stats0->rx_frame_errors + stats1->rx_frame_errors;
+ nstats->rx_length_errors =
+ stats0->rx_length_errors + stats1->rx_length_errors;
+ nstats->rx_missed_errors =
+ stats0->rx_missed_errors + stats1->rx_missed_errors;
+ nstats->rx_over_errors =
+ stats0->rx_over_errors + stats1->rx_over_errors;
+
+ return nstats;
+}
+
+/* Update promiscuous bit */
+static void ravb_set_rx_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 ecmr;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ecmr = ravb_read(ndev, ECMR);
+ if (ndev->flags & IFF_PROMISC)
+ ecmr |= ECMR_PRM;
+ else
+ ecmr &= ~ECMR_PRM;
+ ravb_write(ndev, ecmr, ECMR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/* Device close function for Ethernet AVB */
+static int ravb_close(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Disable interrupts by clearing the interrupt masks. */
+ ravb_write(ndev, 0, RIC0);
+ ravb_write(ndev, 0, RIC1);
+ ravb_write(ndev, 0, RIC2);
+ ravb_write(ndev, 0, TIC);
+
+ /* Stop PTP Clock driver */
+ ravb_ptp_stop(ndev);
+
+ /* Set the config mode to stop the AVB-DMAC's processes */
+ if (ravb_stop_dma(ndev) < 0)
+ netdev_err(ndev,
+ "device will be stopped after h/w processes are done.\n");
+
+ /* Clear the timestamp list */
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+
+ /* PHY disconnect */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ free_irq(ndev->irq, ndev);
+
+ napi_disable(&priv->napi[RAVB_NC]);
+ napi_disable(&priv->napi[RAVB_BE]);
+
+ /* Free all the skb's in the RX queue and the DMA buffers. */
+ ravb_ring_free(ndev, RAVB_BE);
+ ravb_ring_free(ndev, RAVB_NC);
+
+ return 0;
+}
+
+static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
+ HWTSTAMP_TX_OFF;
+ if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ else
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Control hardware time stamping */
+static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct hwtstamp_config config;
+ u32 tstamp_rx_ctrl = RAVB_RXTSTAMP_ENABLED;
+ u32 tstamp_tx_ctrl;
+
+ if (copy_from_user(&config, req->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RAVB_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl |= RAVB_RXTSTAMP_TYPE_ALL;
+ }
+
+ priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* ioctl to device function */
+static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!phydev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return ravb_hwtstamp_get(ndev, req);
+ case SIOCSHWTSTAMP:
+ return ravb_hwtstamp_set(ndev, req);
+ }
+
+ return phy_mii_ioctl(phydev, req, cmd);
+}
+
+static const struct net_device_ops ravb_netdev_ops = {
+ .ndo_open = ravb_open,
+ .ndo_stop = ravb_close,
+ .ndo_start_xmit = ravb_start_xmit,
+ .ndo_select_queue = ravb_select_queue,
+ .ndo_get_stats = ravb_get_stats,
+ .ndo_set_rx_mode = ravb_set_rx_mode,
+ .ndo_tx_timeout = ravb_tx_timeout,
+ .ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ /* Bitbang init */
+ priv->mdiobb.ops = &bb_ops;
+
+ /* MII controller setting */
+ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+ if (!priv->mii_bus)
+ return -ENOMEM;
+
+ /* Hook up MII support for ethtool */
+ priv->mii_bus->name = "ravb_mii";
+ priv->mii_bus->parent = dev;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register MDIO bus */
+ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ if (error)
+ goto out_free_bus;
+
+ return 0;
+
+out_free_bus:
+ free_mdio_bitbang(priv->mii_bus);
+ return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+ /* Unregister mdio bus */
+ mdiobus_unregister(priv->mii_bus);
+
+ /* Free bitbang info */
+ free_mdio_bitbang(priv->mii_bus);
+
+ return 0;
+}
+
+static int ravb_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ravb_private *priv;
+ struct net_device *ndev;
+ int error, irq, q;
+ struct resource *res;
+
+ if (!np) {
+ dev_err(&pdev->dev,
+ "this driver is required to be instantiated from device tree\n");
+ return -EINVAL;
+ }
+
+ /* Get base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
+ NUM_TX_QUEUE, NUM_RX_QUEUE);
+ if (!ndev)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+ ndev->dma = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto out_release;
+ }
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->addr)) {
+ error = PTR_ERR(priv->addr);
+ goto out_release;
+ }
+
+ spin_lock_init(&priv->lock);
+ INIT_WORK(&priv->work, ravb_tx_timeout_work);
+
+ priv->phy_interface = of_get_phy_mode(np);
+
+ priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
+ priv->avb_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ /* Set function */
+ ndev->netdev_ops = &ravb_netdev_ops;
+ ndev->ethtool_ops = &ravb_ethtool_ops;
+
+ /* Set AVB config mode */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
+ CCC);
+
+ /* Set CSEL value */
+ ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
+ CCC);
+
+ /* Set GTI value */
+ ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI);
+
+ /* Request GTI loading */
+ ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+
+ /* Allocate descriptor base address table */
+ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
+ priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+ &priv->desc_bat_dma, GFP_KERNEL);
+ if (!priv->desc_bat) {
+ dev_err(&ndev->dev,
+ "Cannot allocate desc base address table (size %d bytes)\n",
+ priv->desc_bat_size);
+ error = -ENOMEM;
+ goto out_release;
+ }
+ for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
+ priv->desc_bat[q].die_dt = DT_EOS;
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
+
+ /* Initialise HW timestamp list */
+ INIT_LIST_HEAD(&priv->ts_skb_list);
+
+ /* Debug message level */
+ priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+
+ /* Read and set MAC address */
+ ravb_read_mac_address(ndev, of_get_mac_address(np));
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ dev_warn(&pdev->dev,
+ "no valid MAC address supplied, using a random one\n");
+ eth_hw_addr_random(ndev);
+ }
+
+ /* MDIO bus init */
+ error = ravb_mdio_init(priv);
+ if (error) {
+ dev_err(&ndev->dev, "failed to initialize MDIO\n");
+ goto out_dma_free;
+ }
+
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+
+ /* Network device register */
+ error = register_netdev(ndev);
+ if (error)
+ goto out_napi_del;
+
+ /* Print device information */
+ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+
+ platform_set_drvdata(pdev, ndev);
+
+ return 0;
+
+out_napi_del:
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+out_dma_free:
+ dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+out_release:
+ if (ndev)
+ free_netdev(ndev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return error;
+}
+
+static int ravb_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+ priv->desc_bat_dma);
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+ pm_runtime_put_sync(&pdev->dev);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi[RAVB_NC]);
+ netif_napi_del(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
+ pm_runtime_disable(&pdev->dev);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ravb_runtime_nop(struct device *dev)
+{
+ /* Runtime PM callback shared between ->runtime_suspend()
+ * and ->runtime_resume(). Simply returns success.
+ *
+ * This driver re-initializes all registers after
+ * pm_runtime_get_sync() anyway so there is no need
+ * to save and restore registers here.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ravb_dev_pm_ops = {
+ .runtime_suspend = ravb_runtime_nop,
+ .runtime_resume = ravb_runtime_nop,
+};
+
+#define RAVB_PM_OPS (&ravb_dev_pm_ops)
+#else
+#define RAVB_PM_OPS NULL
+#endif
+
+static const struct of_device_id ravb_match_table[] = {
+ { .compatible = "renesas,etheravb-r8a7790" },
+ { .compatible = "renesas,etheravb-r8a7794" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
+static struct platform_driver ravb_driver = {
+ .probe = ravb_probe,
+ .remove = ravb_remove,
+ .driver = {
+ .name = "ravb",
+ .pm = RAVB_PM_OPS,
+ .of_match_table = ravb_match_table,
+ },
+};
+
+module_platform_driver(ravb_driver);
+
+MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
+MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
new file mode 100644
index 000000000000..42656da50500
--- /dev/null
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -0,0 +1,357 @@
+/* PTP 1588 clock using the Renesas Ethernet AVB
+ *
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "ravb.h"
+
+static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ if (error)
+ return error;
+
+ ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+ return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_read(struct ravb_private *priv, struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_CAPTURE);
+ if (error)
+ return error;
+
+ ts->tv_nsec = ravb_read(ndev, GCT0);
+ ts->tv_sec = ravb_read(ndev, GCT1) |
+ ((s64)ravb_read(ndev, GCT2) << 32);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_time_write(struct ravb_private *priv,
+ const struct timespec64 *ts)
+{
+ struct net_device *ndev = priv->ndev;
+ int error;
+ u32 gccr;
+
+ error = ravb_ptp_tcr_request(priv, GCCR_TCR_RESET);
+ if (error)
+ return error;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTO)
+ return -EBUSY;
+ ravb_write(ndev, ts->tv_nsec, GTO0);
+ ravb_write(ndev, ts->tv_sec, GTO1);
+ ravb_write(ndev, (ts->tv_sec >> 32) & 0xffff, GTO2);
+ ravb_write(ndev, gccr | GCCR_LTO, GCCR);
+
+ return 0;
+}
+
+/* Caller must hold the lock */
+static int ravb_ptp_update_compare(struct ravb_private *priv, u32 ns)
+{
+ struct net_device *ndev = priv->ndev;
+ /* When the comparison value (GPTC.PTCV) is in range of
+ * [x-1 to x+1] (x is the configured increment value in
+ * GTI.TIV), it may happen that a comparison match is
+ * not detected when the timer wraps around.
+ */
+ u32 gti_ns_plus_1 = (priv->ptp.current_addend >> 20) + 1;
+ u32 gccr;
+
+ if (ns < gti_ns_plus_1)
+ ns = gti_ns_plus_1;
+ else if (ns > 0 - gti_ns_plus_1)
+ ns = 0 - gti_ns_plus_1;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LPTC)
+ return -EBUSY;
+ ravb_write(ndev, ns, GPTC);
+ ravb_write(ndev, gccr | GCCR_LPTC, GCCR);
+
+ return 0;
+}
+
+/* PTP clock operations */
+static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 diff, addend;
+ bool neg_adj = false;
+ u32 gccr;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+ addend = priv->ptp.default_addend;
+ diff = div_u64((u64)addend * ppb, NSEC_PER_SEC);
+
+ addend = neg_adj ? addend - diff : addend + diff;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->ptp.current_addend = addend;
+
+ gccr = ravb_read(ndev, GCCR);
+ if (gccr & GCCR_LTI)
+ return -EBUSY;
+ ravb_write(ndev, addend & GTI_TIV, GTI);
+ ravb_write(ndev, gccr | GCCR_LTI, GCCR);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct timespec64 ts;
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, &ts);
+ if (!error) {
+ u64 now = ktime_to_ns(timespec64_to_ktime(ts));
+
+ ts = ns_to_timespec64(now + delta);
+ error = ravb_ptp_time_write(priv, &ts);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_read(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ unsigned long flags;
+ int error;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ error = ravb_ptp_time_write(priv, ts);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_extts(struct ptp_clock_info *ptp,
+ struct ptp_extts_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ unsigned long flags;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (priv->ptp.extts[req->index] == on)
+ return 0;
+ priv->ptp.extts[req->index] = on;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ gic = ravb_read(ndev, GIC);
+ if (on)
+ gic |= GIC_PTCE;
+ else
+ gic &= ~GIC_PTCE;
+ ravb_write(ndev, gic, GIC);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int ravb_ptp_perout(struct ptp_clock_info *ptp,
+ struct ptp_perout_request *req, int on)
+{
+ struct ravb_private *priv = container_of(ptp, struct ravb_private,
+ ptp.info);
+ struct net_device *ndev = priv->ndev;
+ struct ravb_ptp_perout *perout;
+ unsigned long flags;
+ int error = 0;
+ u32 gic;
+
+ if (req->index)
+ return -EINVAL;
+
+ if (on) {
+ u64 start_ns;
+ u64 period_ns;
+
+ start_ns = req->start.sec * NSEC_PER_SEC + req->start.nsec;
+ period_ns = req->period.sec * NSEC_PER_SEC + req->period.nsec;
+
+ if (start_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: start value (nsec) is over limit. Maximum size of start is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ if (period_ns > U32_MAX) {
+ netdev_warn(ndev,
+ "ptp: period value (nsec) is over limit. Maximum size of period is only 32 bits\n");
+ return -ERANGE;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->target = (u32)start_ns;
+ perout->period = (u32)period_ns;
+ error = ravb_ptp_update_compare(priv, (u32)start_ns);
+ if (!error) {
+ /* Unmask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic |= GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ } else {
+ spin_lock_irqsave(&priv->lock, flags);
+
+ perout = &priv->ptp.perout[req->index];
+ perout->period = 0;
+
+ /* Mask interrupt */
+ gic = ravb_read(ndev, GIC);
+ gic &= ~GIC_PTME;
+ ravb_write(ndev, gic, GIC);
+ }
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return error;
+}
+
+static int ravb_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ switch (req->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return ravb_ptp_extts(ptp, &req->extts, on);
+ case PTP_CLK_REQ_PEROUT:
+ return ravb_ptp_perout(ptp, &req->perout, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ptp_clock_info ravb_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ravb clock",
+ .max_adj = 50000000,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = N_PER_OUT,
+ .adjfreq = ravb_ptp_adjfreq,
+ .adjtime = ravb_ptp_adjtime,
+ .gettime64 = ravb_ptp_gettime64,
+ .settime64 = ravb_ptp_settime64,
+ .enable = ravb_ptp_enable,
+};
+
+/* Caller must hold the lock */
+irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 gis = ravb_read(ndev, GIS);
+
+ gis &= ravb_read(ndev, GIC);
+ if (gis & GIS_PTCF) {
+ struct ptp_clock_event event;
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ravb_read(ndev, GCPT);
+ ptp_clock_event(priv->ptp.clock, &event);
+ }
+ if (gis & GIS_PTMF) {
+ struct ravb_ptp_perout *perout = priv->ptp.perout;
+
+ if (perout->period) {
+ perout->target += perout->period;
+ ravb_ptp_update_compare(priv, perout->target);
+ }
+ }
+
+ if (gis) {
+ ravb_write(ndev, ~gis, GIS);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ u32 gccr;
+
+ priv->ptp.info = ravb_ptp_info;
+
+ priv->ptp.default_addend = ravb_read(ndev, GTI);
+ priv->ptp.current_addend = priv->ptp.default_addend;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
+ gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
+ ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->ptp.clock = ptp_clock_register(&priv->ptp.info, &pdev->dev);
+}
+
+void ravb_ptp_stop(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_write(ndev, 0, GIC);
+ ravb_write(ndev, 0, GIS);
+
+ ptp_clock_unregister(priv->ptp.clock);
+}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index cf98cc9bbc8d..819289e5be4f 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -181,7 +181,7 @@ struct rocker_desc_info {
size_t data_size;
size_t tlv_size;
struct rocker_desc *desc;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ dma_addr_t mapaddr;
};
struct rocker_dma_ring_info {
@@ -225,6 +225,7 @@ struct rocker_port {
struct napi_struct napi_rx;
struct rocker_dma_ring_info tx_ring;
struct rocker_dma_ring_info rx_ring;
+ struct list_head trans_mem;
};
struct rocker {
@@ -236,21 +237,21 @@ struct rocker {
struct {
u64 id;
} hw;
- spinlock_t cmd_ring_lock;
+ spinlock_t cmd_ring_lock; /* for cmd ring accesses */
struct rocker_dma_ring_info cmd_ring;
struct rocker_dma_ring_info event_ring;
DECLARE_HASHTABLE(flow_tbl, 16);
- spinlock_t flow_tbl_lock;
+ spinlock_t flow_tbl_lock; /* for flow tbl accesses */
u64 flow_tbl_next_cookie;
DECLARE_HASHTABLE(group_tbl, 16);
- spinlock_t group_tbl_lock;
+ spinlock_t group_tbl_lock; /* for group tbl accesses */
DECLARE_HASHTABLE(fdb_tbl, 16);
- spinlock_t fdb_tbl_lock;
+ spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
DECLARE_HASHTABLE(internal_vlan_tbl, 8);
- spinlock_t internal_vlan_tbl_lock;
+ spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
DECLARE_HASHTABLE(neigh_tbl, 16);
- spinlock_t neigh_tbl_lock;
+ spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
u32 neigh_tbl_next_index;
};
@@ -294,7 +295,7 @@ static bool rocker_vlan_id_is_internal(__be16 vlan_id)
return (_vlan_id >= start && _vlan_id <= end);
}
-static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
u16 vid, bool *pop_vlan)
{
__be16 vlan_id;
@@ -311,7 +312,7 @@ static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
return vlan_id;
}
-static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
__be16 vlan_id)
{
if (rocker_vlan_id_is_internal(vlan_id))
@@ -320,21 +321,87 @@ static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
return ntohs(vlan_id);
}
-static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
return !!rocker_port->bridge_dev;
}
+static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, size_t size)
+{
+ struct list_head *elem = NULL;
+
+ /* If in transaction prepare phase, allocate the memory
+ * and enqueue it on a per-port list. If in transaction
+ * commit phase, dequeue the memory from the per-port list
+ * rather than re-allocating the memory. The idea is the
+ * driver code paths for prepare and commit are identical
+ * so the memory allocated in the prepare phase is the
+ * memory used in the commit phase.
+ */
+
+ switch (trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+ if (!elem)
+ return NULL;
+ list_add_tail(elem, &rocker_port->trans_mem);
+ break;
+ case SWITCHDEV_TRANS_COMMIT:
+ BUG_ON(list_empty(&rocker_port->trans_mem));
+ elem = rocker_port->trans_mem.next;
+ list_del_init(elem);
+ break;
+ case SWITCHDEV_TRANS_NONE:
+ elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+ if (elem)
+ INIT_LIST_HEAD(elem);
+ break;
+ default:
+ break;
+ }
+
+ return elem ? elem + 1 : NULL;
+}
+
+static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, size_t size)
+{
+ return __rocker_port_mem_alloc(rocker_port, trans, size);
+}
+
+static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, size_t n,
+ size_t size)
+{
+ return __rocker_port_mem_alloc(rocker_port, trans, n * size);
+}
+
+static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+{
+ struct list_head *elem;
+
+ /* Frees are ignored if in transaction prepare phase. The
+ * memory remains on the per-port list until freed in the
+ * commit phase.
+ */
+
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
+
+ elem = (struct list_head *)mem - 1;
+ BUG_ON(!list_empty(elem));
+ kfree(elem);
+}
+
struct rocker_wait {
wait_queue_head_t wait;
bool done;
- bool nowait;
};
static void rocker_wait_reset(struct rocker_wait *wait)
{
wait->done = false;
- wait->nowait = false;
}
static void rocker_wait_init(struct rocker_wait *wait)
@@ -343,20 +410,22 @@ static void rocker_wait_init(struct rocker_wait *wait)
rocker_wait_reset(wait);
}
-static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
struct rocker_wait *wait;
- wait = kmalloc(sizeof(*wait), gfp);
+ wait = rocker_port_kzalloc(rocker_port, trans, sizeof(*wait));
if (!wait)
return NULL;
rocker_wait_init(wait);
return wait;
}
-static void rocker_wait_destroy(struct rocker_wait *work)
+static void rocker_wait_destroy(enum switchdev_trans trans,
+ struct rocker_wait *wait)
{
- kfree(work);
+ rocker_port_kfree(trans, wait);
}
static bool rocker_wait_event_timeout(struct rocker_wait *wait,
@@ -374,18 +443,18 @@ static void rocker_wait_wake_up(struct rocker_wait *wait)
wake_up(&wait->wait);
}
-static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
{
return rocker->msix_entries[vector].vector;
}
-static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_TX(rocker_port->port_number));
}
-static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_RX(rocker_port->port_number));
@@ -404,9 +473,9 @@ static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
* HW basic testing functions
*****************************/
-static int rocker_reg_test(struct rocker *rocker)
+static int rocker_reg_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
u64 test_reg;
u64 rnd;
@@ -434,12 +503,12 @@ static int rocker_reg_test(struct rocker *rocker)
return 0;
}
-static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
- u32 test_type, dma_addr_t dma_handle,
- unsigned char *buf, unsigned char *expect,
- size_t size)
+static int rocker_dma_test_one(const struct rocker *rocker,
+ struct rocker_wait *wait, u32 test_type,
+ dma_addr_t dma_handle, const unsigned char *buf,
+ const unsigned char *expect, size_t size)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int i;
rocker_wait_reset(wait);
@@ -463,7 +532,7 @@ static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
-static int rocker_dma_test_offset(struct rocker *rocker,
+static int rocker_dma_test_offset(const struct rocker *rocker,
struct rocker_wait *wait, int offset)
{
struct pci_dev *pdev = rocker->pdev;
@@ -523,7 +592,8 @@ free_alloc:
return err;
}
-static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+static int rocker_dma_test(const struct rocker *rocker,
+ struct rocker_wait *wait)
{
int i;
int err;
@@ -545,9 +615,9 @@ static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int rocker_basic_hw_test(struct rocker *rocker)
+static int rocker_basic_hw_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_wait wait;
int err;
@@ -680,7 +750,7 @@ static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
return *(u64 *) rocker_tlv_data(tlv);
}
-static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
const char *buf, int buf_len)
{
const struct rocker_tlv *tlv;
@@ -693,19 +763,19 @@ static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
u32 type = rocker_tlv_type(tlv);
if (type > 0 && type <= maxtype)
- tb[type] = (struct rocker_tlv *) tlv;
+ tb[type] = tlv;
}
}
-static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
const struct rocker_tlv *tlv)
{
rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
rocker_tlv_len(tlv));
}
-static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
- struct rocker_desc_info *desc_info)
+static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+ const struct rocker_desc_info *desc_info)
{
rocker_tlv_parse(tb, maxtype, desc_info->data,
desc_info->desc->tlv_size);
@@ -790,9 +860,9 @@ static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
}
static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
- struct rocker_tlv *start)
+ const struct rocker_tlv *start)
{
- desc_info->tlv_size = (char *) start - desc_info->data;
+ desc_info->tlv_size = (const char *) start - desc_info->data;
}
/******************************************
@@ -804,7 +874,7 @@ static u32 __pos_inc(u32 pos, size_t limit)
return ++pos == limit ? 0 : pos;
}
-static int rocker_desc_err(struct rocker_desc_info *desc_info)
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
{
int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
@@ -832,31 +902,31 @@ static int rocker_desc_err(struct rocker_desc_info *desc_info)
return -EINVAL;
}
-static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
{
desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
}
-static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
{
u32 comp_err = desc_info->desc->comp_err;
return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
}
-static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
{
return (void *)(uintptr_t)desc_info->desc->cookie;
}
-static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
void *ptr)
{
desc_info->desc->cookie = (uintptr_t) ptr;
}
static struct rocker_desc_info *
-rocker_desc_head_get(struct rocker_dma_ring_info *info)
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
{
static struct rocker_desc_info *desc_info;
u32 head = __pos_inc(info->head, info->size);
@@ -868,15 +938,15 @@ rocker_desc_head_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
{
desc_info->desc->buf_size = desc_info->data_size;
desc_info->desc->tlv_size = desc_info->tlv_size;
}
-static void rocker_desc_head_set(struct rocker *rocker,
+static void rocker_desc_head_set(const struct rocker *rocker,
struct rocker_dma_ring_info *info,
- struct rocker_desc_info *desc_info)
+ const struct rocker_desc_info *desc_info)
{
u32 head = __pos_inc(info->head, info->size);
@@ -901,8 +971,8 @@ rocker_desc_tail_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_dma_ring_credits_set(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
u32 credits)
{
if (credits)
@@ -915,7 +985,7 @@ static unsigned long rocker_dma_ring_size_fix(size_t size)
min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
}
-static int rocker_dma_ring_create(struct rocker *rocker,
+static int rocker_dma_ring_create(const struct rocker *rocker,
unsigned int type,
size_t size,
struct rocker_dma_ring_info *info)
@@ -951,8 +1021,8 @@ static int rocker_dma_ring_create(struct rocker *rocker,
return 0;
}
-static void rocker_dma_ring_destroy(struct rocker *rocker,
- struct rocker_dma_ring_info *info)
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info)
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
@@ -962,7 +1032,7 @@ static void rocker_dma_ring_destroy(struct rocker *rocker,
kfree(info->desc_info);
}
-static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
struct rocker_dma_ring_info *info)
{
int i;
@@ -977,8 +1047,8 @@ static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
rocker_desc_commit(&info->desc_info[i]);
}
-static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction, size_t buf_size)
{
struct pci_dev *pdev = rocker->pdev;
@@ -1015,7 +1085,7 @@ static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
rollback:
for (i--; i >= 0; i--) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
@@ -1024,15 +1094,15 @@ rollback:
return err;
}
-static void rocker_dma_ring_bufs_free(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction)
{
struct pci_dev *pdev = rocker->pdev;
int i;
for (i = 0; i < info->size; i++) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
struct rocker_desc *desc = &info->desc[i];
desc->buf_addr = 0;
@@ -1045,7 +1115,7 @@ static void rocker_dma_ring_bufs_free(struct rocker *rocker,
static int rocker_dma_rings_init(struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
@@ -1102,11 +1172,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker)
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
-static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
struct sk_buff *skb, size_t buf_len)
{
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1126,13 +1196,12 @@ tlv_put_failure:
return -EMSGSIZE;
}
-static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
{
return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
}
-static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
struct net_device *dev = rocker_port->dev;
@@ -1149,8 +1218,7 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
skb = netdev_alloc_skb_ip_align(dev, buf_len);
if (!skb)
return -ENOMEM;
- err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
- skb, buf_len);
+ err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
if (err) {
dev_kfree_skb_any(skb);
return err;
@@ -1159,8 +1227,8 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
return 0;
}
-static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
- struct rocker_tlv **attrs)
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+ const struct rocker_tlv **attrs)
{
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1174,10 +1242,10 @@ static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
}
-static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
if (!skb)
@@ -1187,15 +1255,15 @@ static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
dev_kfree_skb_any(skb);
}
-static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
int err;
for (i = 0; i < rx_ring->size; i++) {
- err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+ err = rocker_dma_rx_ring_skb_alloc(rocker_port,
&rx_ring->desc_info[i]);
if (err)
goto rollback;
@@ -1208,10 +1276,10 @@ rollback:
return err;
}
-static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
for (i = 0; i < rx_ring->size; i++)
@@ -1257,7 +1325,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
goto err_dma_rx_ring_bufs_alloc;
}
- err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+ err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
goto err_dma_rx_ring_skbs_alloc;
@@ -1283,7 +1351,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
{
struct rocker *rocker = rocker_port->rocker;
- rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+ rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
PCI_DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
@@ -1292,7 +1360,8 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
-static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+ bool enable)
{
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
@@ -1310,19 +1379,14 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct rocker_desc_info *desc_info;
+ const struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
u32 credits = 0;
spin_lock(&rocker->cmd_ring_lock);
while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
wait = rocker_desc_cookie_ptr_get(desc_info);
- if (wait->nowait) {
- rocker_desc_gen_clear(desc_info);
- rocker_wait_destroy(wait);
- } else {
- rocker_wait_wake_up(wait);
- }
+ rocker_wait_wake_up(wait);
credits++;
}
spin_unlock(&rocker->cmd_ring_lock);
@@ -1331,22 +1395,22 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rocker_port_link_up(struct rocker_port *rocker_port)
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
{
netif_carrier_on(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is up\n");
}
-static void rocker_port_link_down(struct rocker_port *rocker_port)
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
{
netif_carrier_off(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is down\n");
}
-static int rocker_event_link_change(struct rocker *rocker,
+static int rocker_event_link_change(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
unsigned int port_number;
bool link_up;
struct rocker_port *rocker_port;
@@ -1374,22 +1438,44 @@ static int rocker_event_link_change(struct rocker *rocker,
}
#define ROCKER_OP_FLAG_REMOVE BIT(0)
-#define ROCKER_OP_FLAG_NOWAIT BIT(1)
-#define ROCKER_OP_FLAG_LEARNED BIT(2)
-#define ROCKER_OP_FLAG_REFRESH BIT(3)
+#define ROCKER_OP_FLAG_LEARNED BIT(1)
+#define ROCKER_OP_FLAG_REFRESH BIT(2)
static int rocker_port_fdb(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags);
-static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+struct rocker_mac_vlan_seen_work {
+ struct work_struct work;
+ struct rocker_port *rocker_port;
+ int flags;
+ unsigned char addr[ETH_ALEN];
+ __be16 vlan_id;
+};
+
+static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
+{
+ const struct rocker_mac_vlan_seen_work *sw =
+ container_of(work, struct rocker_mac_vlan_seen_work, work);
+
+ rtnl_lock();
+ rocker_port_fdb(sw->rocker_port, SWITCHDEV_TRANS_NONE,
+ sw->addr, sw->vlan_id, sw->flags);
+ rtnl_unlock();
+
+ kfree(work);
+}
+
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ struct rocker_mac_vlan_seen_work *sw;
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
unsigned int port_number;
struct rocker_port *rocker_port;
- unsigned char *addr;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
+ const unsigned char *addr;
+ int flags = ROCKER_OP_FLAG_LEARNED;
__be16 vlan_id;
rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
@@ -1411,14 +1497,27 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
rocker_port->stp_state != BR_STATE_FORWARDING)
return 0;
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ sw = kmalloc(sizeof(*sw), GFP_ATOMIC);
+ if (!sw)
+ return -ENOMEM;
+
+ INIT_WORK(&sw->work, rocker_event_mac_vlan_seen_work);
+
+ sw->rocker_port = rocker_port;
+ sw->flags = flags;
+ ether_addr_copy(sw->addr, addr);
+ sw->vlan_id = vlan_id;
+
+ schedule_work(&sw->work);
+
+ return 0;
}
-static int rocker_event_process(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static int rocker_event_process(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
- struct rocker_tlv *info;
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ const struct rocker_tlv *info;
u16 type;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
@@ -1442,8 +1541,8 @@ static int rocker_event_process(struct rocker *rocker,
static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct pci_dev *pdev = rocker->pdev;
- struct rocker_desc_info *desc_info;
+ const struct pci_dev *pdev = rocker->pdev;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -1487,65 +1586,70 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
* Command interface
********************/
-typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv);
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
+
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv);
-static int rocker_cmd_exec(struct rocker *rocker,
- struct rocker_port *rocker_port,
- rocker_cmd_cb_t prepare, void *prepare_priv,
- rocker_cmd_cb_t process, void *process_priv,
- bool nowait)
+static int rocker_cmd_exec(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+ rocker_cmd_proc_cb_t process, void *process_priv)
{
+ struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
unsigned long flags;
int err;
- wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+ wait = rocker_wait_create(rocker_port, trans);
if (!wait)
return -ENOMEM;
- wait->nowait = nowait;
spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+
desc_info = rocker_desc_head_get(&rocker->cmd_ring);
if (!desc_info) {
spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
err = -EAGAIN;
goto out;
}
- err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+
+ err = prepare(rocker_port, desc_info, prepare_priv);
if (err) {
spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
goto out;
}
+
rocker_desc_cookie_ptr_set(desc_info, wait);
- rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
- if (nowait)
- return 0;
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
- if (!rocker_wait_event_timeout(wait, HZ / 10))
- return -EIO;
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ if (!rocker_wait_event_timeout(wait, HZ / 10))
+ return -EIO;
err = rocker_desc_err(desc_info);
if (err)
return err;
if (process)
- err = process(rocker, rocker_port, desc_info, process_priv);
+ err = process(rocker_port, desc_info, process_priv);
rocker_desc_gen_clear(desc_info);
out:
- rocker_wait_destroy(wait);
+ rocker_wait_destroy(trans, wait);
return err;
}
static int
-rocker_cmd_get_port_settings_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1565,14 +1669,13 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
struct ethtool_cmd *ecmd = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
u8 duplex;
u8 autoneg;
@@ -1604,15 +1707,14 @@ rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
unsigned char *macaddr = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attr;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1637,17 +1739,16 @@ struct port_name {
};
static int
-rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
struct port_name *name = priv;
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attr;
size_t i, j, len;
- char *str;
+ const char *str;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1679,8 +1780,7 @@ rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1710,12 +1810,11 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- unsigned char *macaddr = priv;
+ const unsigned char *macaddr = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1735,8 +1834,7 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_learning_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1761,46 +1859,48 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
- ecmd, false);
+ ecmd);
}
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
- macaddr, false);
+ macaddr);
}
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL, false);
+ ecmd, NULL, NULL);
}
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_set_port_settings_macaddr_prep,
- macaddr, NULL, NULL, false);
+ macaddr, NULL, NULL);
}
-static int rocker_port_set_learning(struct rocker_port *rocker_port)
+static int rocker_port_set_learning(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, trans,
rocker_cmd_set_port_learning_prep,
- NULL, NULL, NULL, false);
+ NULL, NULL, NULL);
}
-static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.ig_port.in_pport))
@@ -1815,8 +1915,9 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.vlan.in_pport))
@@ -1838,8 +1939,9 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.term_mac.in_pport))
@@ -1875,7 +1977,7 @@ static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
static int
rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
entry->key.ucast_routing.eth_type))
@@ -1896,8 +1998,9 @@ rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (entry->key.bridge.has_eth_dst &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
@@ -1929,8 +2032,9 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.acl.in_pport))
@@ -1995,12 +2099,11 @@ static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_flow_tbl_entry *entry = priv;
+ const struct rocker_flow_tbl_entry *entry = priv;
struct rocker_tlv *cmd_info;
int err = 0;
@@ -2053,8 +2156,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2090,7 +2192,7 @@ rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
entry->l2_rewrite.group_id))
@@ -2113,7 +2215,7 @@ rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
int i;
struct rocker_tlv *group_ids;
@@ -2139,7 +2241,7 @@ rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
@@ -2163,8 +2265,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_group_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2209,8 +2310,7 @@ static int rocker_cmd_group_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_group_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2293,7 +2393,8 @@ static void rocker_free_tbls(struct rocker *rocker)
}
static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+rocker_flow_tbl_find(const struct rocker *rocker,
+ const struct rocker_flow_tbl_entry *match)
{
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
@@ -2308,8 +2409,8 @@ rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
}
static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- struct rocker_flow_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
@@ -2324,8 +2425,9 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
if (found) {
match->cookie = found->cookie;
- hash_del(&found->entry);
- kfree(found);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
+ rocker_port_kfree(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
@@ -2334,18 +2436,18 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
}
- hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- return rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_flow_tbl_add,
- found, NULL, NULL, nowait);
+ return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add,
+ found, NULL, NULL);
}
static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- struct rocker_flow_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_flow_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_flow_tbl_entry *found;
@@ -2360,47 +2462,43 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
found = rocker_flow_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
}
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- kfree(match);
+ rocker_port_kfree(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port,
+ err = rocker_cmd_exec(rocker_port, trans,
rocker_cmd_flow_tbl_del,
- found, NULL, NULL, nowait);
- kfree(found);
+ found, NULL, NULL);
+ rocker_port_kfree(trans, found);
}
return err;
}
-static gfp_t rocker_op_flags_gfp(int flags)
-{
- return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
-}
-
static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
- int flags, struct rocker_flow_tbl_entry *entry)
+ enum switchdev_trans trans, int flags,
+ struct rocker_flow_tbl_entry *entry)
{
- bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_flow_tbl_del(rocker_port, entry, nowait);
+ return rocker_flow_tbl_del(rocker_port, trans, entry);
else
- return rocker_flow_tbl_add(rocker_port, entry, nowait);
+ return rocker_flow_tbl_add(rocker_port, trans, entry);
}
static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- int flags, u32 in_pport, u32 in_pport_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
enum rocker_of_dpa_table_id goto_tbl)
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2410,18 +2508,19 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
entry->key.ig_port.in_pport_mask = in_pport_mask;
entry->key.ig_port.goto_tbl = goto_tbl;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- int flags, u32 in_pport,
- __be16 vlan_id, __be16 vlan_id_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, __be16 vlan_id,
+ __be16 vlan_id_mask,
enum rocker_of_dpa_table_id goto_tbl,
bool untagged, __be16 new_vlan_id)
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2435,10 +2534,11 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
entry->key.vlan.untagged = untagged;
entry->key.vlan.new_vlan_id = new_vlan_id;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
u32 in_pport, u32 in_pport_mask,
__be16 eth_type, const u8 *eth_dst,
const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2447,7 +2547,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2471,11 +2571,11 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
entry->key.term_mac.vlan_id_mask = vlan_id_mask;
entry->key.term_mac.copy_to_cpu = copy_to_cpu;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
- int flags,
+ enum switchdev_trans trans, int flags,
const u8 *eth_dst, const u8 *eth_dst_mask,
__be16 vlan_id, u32 tunnel_id,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2487,7 +2587,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
bool wild = false;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2500,7 +2600,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
if (eth_dst_mask) {
entry->key.bridge.has_eth_dst_mask = 1;
ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
- if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+ if (!ether_addr_equal(eth_dst_mask, ff_mac))
wild = true;
}
@@ -2525,10 +2625,11 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
entry->key.bridge.group_id = group_id;
entry->key.bridge.copy_to_cpu = copy_to_cpu;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
__be16 eth_type, __be32 dst,
__be32 dst_mask, u32 priority,
enum rocker_of_dpa_table_id goto_tbl,
@@ -2536,7 +2637,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
{
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2550,30 +2651,29 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
entry->key_len = offsetof(struct rocker_flow_tbl_key,
ucast_routing.group_id);
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- int flags, u32 in_pport,
- u32 in_pport_mask,
+ enum switchdev_trans trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
const u8 *eth_src, const u8 *eth_src_mask,
const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 eth_type,
- __be16 vlan_id, __be16 vlan_id_mask,
- u8 ip_proto, u8 ip_proto_mask,
- u8 ip_tos, u8 ip_tos_mask,
+ __be16 eth_type, __be16 vlan_id,
+ __be16 vlan_id_mask, u8 ip_proto,
+ u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
u32 group_id)
{
u32 priority;
struct rocker_flow_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
priority = ROCKER_PRIORITY_ACL_NORMAL;
if (eth_dst && eth_dst_mask) {
- if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+ if (ether_addr_equal(eth_dst_mask, mcast_mac))
priority = ROCKER_PRIORITY_ACL_DFLT;
else if (is_link_local_ether_addr(eth_dst))
priority = ROCKER_PRIORITY_ACL_CTRL;
@@ -2602,12 +2702,12 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
entry->key.acl.ip_tos_mask = ip_tos_mask;
entry->key.acl.group_id = group_id;
- return rocker_flow_tbl_do(rocker_port, flags, entry);
+ return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
}
static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(struct rocker *rocker,
- struct rocker_group_tbl_entry *match)
+rocker_group_tbl_find(const struct rocker *rocker,
+ const struct rocker_group_tbl_entry *match)
{
struct rocker_group_tbl_entry *found;
@@ -2620,22 +2720,23 @@ rocker_group_tbl_find(struct rocker *rocker,
return NULL;
}
-static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- kfree(entry->group_ids);
+ rocker_port_kfree(trans, entry->group_ids);
break;
default:
break;
}
- kfree(entry);
+ rocker_port_kfree(trans, entry);
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- struct rocker_group_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
@@ -2646,8 +2747,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
- rocker_group_tbl_entry_free(found);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
+ rocker_group_tbl_entry_free(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
} else {
@@ -2655,18 +2757,18 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
}
- hash_add(rocker->group_tbl, &found->entry, found->group_id);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->group_tbl, &found->entry, found->group_id);
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- return rocker_cmd_exec(rocker, rocker_port,
- rocker_cmd_group_tbl_add,
- found, NULL, NULL, nowait);
+ return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add,
+ found, NULL, NULL);
}
static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- struct rocker_group_tbl_entry *match,
- bool nowait)
+ enum switchdev_trans trans,
+ struct rocker_group_tbl_entry *match)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_group_tbl_entry *found;
@@ -2678,93 +2780,95 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
found = rocker_group_tbl_find(rocker, match);
if (found) {
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
}
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- rocker_group_tbl_entry_free(match);
+ rocker_group_tbl_entry_free(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port,
+ err = rocker_cmd_exec(rocker_port, trans,
rocker_cmd_group_tbl_del,
- found, NULL, NULL, nowait);
- rocker_group_tbl_entry_free(found);
+ found, NULL, NULL);
+ rocker_group_tbl_entry_free(trans, found);
}
return err;
}
static int rocker_group_tbl_do(struct rocker_port *rocker_port,
- int flags, struct rocker_group_tbl_entry *entry)
+ enum switchdev_trans trans, int flags,
+ struct rocker_group_tbl_entry *entry)
{
- bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_group_tbl_del(rocker_port, entry, nowait);
+ return rocker_group_tbl_del(rocker_port, trans, entry);
else
- return rocker_group_tbl_add(rocker_port, entry, nowait);
+ return rocker_group_tbl_add(rocker_port, trans, entry);
}
static int rocker_group_l2_interface(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- u32 out_pport, int pop_vlan)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, u32 out_pport,
+ int pop_vlan)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
entry->l2_interface.pop_vlan = pop_vlan;
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
int flags, u8 group_count,
- u32 *group_ids, u32 group_id)
+ const u32 *group_ids, u32 group_id)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
entry->group_id = group_id;
entry->group_count = group_count;
- entry->group_ids = kcalloc(group_count, sizeof(u32),
- rocker_op_flags_gfp(flags));
+ entry->group_ids = rocker_port_kcalloc(rocker_port, trans, group_count,
+ sizeof(u32));
if (!entry->group_ids) {
- kfree(entry);
+ rocker_port_kfree(trans, entry);
return -ENOMEM;
}
memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static int rocker_group_l2_flood(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- u8 group_count, u32 *group_ids,
- u32 group_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, u8 group_count,
+ const u32 *group_ids, u32 group_id)
{
- return rocker_group_l2_fan_out(rocker_port, flags,
+ return rocker_group_l2_fan_out(rocker_port, trans, flags,
group_count, group_ids,
group_id);
}
static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
- int flags, u32 index, u8 *src_mac,
- u8 *dst_mac, __be16 vlan_id,
- bool ttl_check, u32 pport)
+ enum switchdev_trans trans, int flags,
+ u32 index, const u8 *src_mac, const u8 *dst_mac,
+ __be16 vlan_id, bool ttl_check, u32 pport)
{
struct rocker_group_tbl_entry *entry;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2777,11 +2881,11 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
entry->l3_unicast.ttl_check = ttl_check;
entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
- return rocker_group_tbl_do(rocker_port, flags, entry);
+ return rocker_group_tbl_do(rocker_port, trans, flags, entry);
}
static struct rocker_neigh_tbl_entry *
- rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
{
struct rocker_neigh_tbl_entry *found;
@@ -2794,37 +2898,44 @@ static struct rocker_neigh_tbl_entry *
}
static void _rocker_neigh_add(struct rocker *rocker,
+ enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
- entry->index = rocker->neigh_tbl_next_index++;
+ entry->index = rocker->neigh_tbl_next_index;
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
+ rocker->neigh_tbl_next_index++;
entry->ref_count++;
hash_add(rocker->neigh_tbl, &entry->entry,
be32_to_cpu(entry->ip_addr));
}
-static void _rocker_neigh_del(struct rocker *rocker,
+static void _rocker_neigh_del(enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
- kfree(entry);
+ rocker_port_kfree(trans, entry);
}
}
-static void _rocker_neigh_update(struct rocker *rocker,
- struct rocker_neigh_tbl_entry *entry,
- u8 *eth_dst, bool ttl_check)
+static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
+ enum switchdev_trans trans,
+ const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = ttl_check;
- } else {
+ } else if (trans != SWITCHDEV_TRANS_PREPARE) {
entry->ref_count++;
}
}
static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
- int flags, __be32 ip_addr, u8 *eth_dst)
+ enum switchdev_trans trans,
+ int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_neigh_tbl_entry *entry;
@@ -2840,7 +2951,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2857,12 +2968,12 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
entry->dev = rocker_port->dev;
ether_addr_copy(entry->eth_dst, eth_dst);
entry->ttl_check = true;
- _rocker_neigh_add(rocker, entry);
+ _rocker_neigh_add(rocker, trans, entry);
} else if (removing) {
memcpy(entry, found, sizeof(*entry));
- _rocker_neigh_del(rocker, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
- _rocker_neigh_update(rocker, found, eth_dst, true);
+ _rocker_neigh_update(found, trans, eth_dst, true);
memcpy(entry, found, sizeof(*entry));
} else {
err = -ENOENT;
@@ -2879,7 +2990,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
* other routes' nexthops.
*/
- err = rocker_group_l3_unicast(rocker_port, flags,
+ err = rocker_group_l3_unicast(rocker_port, trans, flags,
entry->index,
rocker_port->dev->dev_addr,
entry->eth_dst,
@@ -2895,7 +3006,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
if (adding || removing) {
group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
- err = rocker_flow_tbl_ucast4_routing(rocker_port,
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
eth_type, ip_addr,
inet_make_mask(32),
priority, goto_tbl,
@@ -2909,13 +3020,13 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
err_out:
if (!adding)
- kfree(entry);
+ rocker_port_kfree(trans, entry);
return err;
}
static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
- __be32 ip_addr)
+ enum switchdev_trans trans, __be32 ip_addr)
{
struct net_device *dev = rocker_port->dev;
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -2933,7 +3044,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
*/
if (n->nud_state & NUD_VALID)
- err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+ err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
+ ip_addr, n->ha);
else
neigh_event_send(n, NULL);
@@ -2941,7 +3053,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
__be32 ip_addr, u32 *index)
{
struct rocker *rocker = rocker_port->rocker;
@@ -2954,7 +3067,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
bool resolved = true;
int err = 0;
- entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
if (!entry)
return -ENOMEM;
@@ -2971,13 +3084,13 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
if (adding) {
entry->ip_addr = ip_addr;
entry->dev = rocker_port->dev;
- _rocker_neigh_add(rocker, entry);
+ _rocker_neigh_add(rocker, trans, entry);
*index = entry->index;
resolved = false;
} else if (removing) {
- _rocker_neigh_del(rocker, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
- _rocker_neigh_update(rocker, found, NULL, false);
+ _rocker_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
} else {
err = -ENOENT;
@@ -2986,7 +3099,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
if (!adding)
- kfree(entry);
+ rocker_port_kfree(trans, entry);
if (err)
return err;
@@ -2994,24 +3107,25 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
/* Resolved means neigh ip_addr is resolved to neigh mac. */
if (!resolved)
- err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+ err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
return err;
}
static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
u32 *group_ids;
u8 group_count = 0;
int err = 0;
int i;
- group_ids = kcalloc(rocker->port_count, sizeof(u32),
- rocker_op_flags_gfp(flags));
+ group_ids = rocker_port_kcalloc(rocker_port, trans, rocker->port_count,
+ sizeof(u32));
if (!group_ids)
return -ENOMEM;
@@ -3022,6 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
+ if (!p)
+ continue;
if (!rocker_port_is_bridged(p))
continue;
if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
@@ -3034,23 +3150,22 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
if (group_count == 0)
goto no_ports_in_vlan;
- err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
- group_count, group_ids,
- group_id);
+ err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
+ group_count, group_ids, group_id);
if (err)
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
no_ports_in_vlan:
- kfree(group_ids);
+ rocker_port_kfree(trans, group_ids);
return err;
}
static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id,
- bool pop_vlan)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id, bool pop_vlan)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_port *p;
bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
u32 out_pport;
@@ -3065,9 +3180,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
if (rocker_port->stp_state == BR_STATE_LEARNING ||
rocker_port->stp_state == BR_STATE_FORWARDING) {
out_pport = rocker_port->pport;
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for pport %d\n",
@@ -3083,7 +3197,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
- if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
ref++;
}
@@ -3091,9 +3205,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
return 0;
out_pport = 0;
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -3149,14 +3262,14 @@ static struct rocker_ctrl {
};
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
- __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = rocker_port->pport;
u32 in_pport_mask = 0xffffffff;
u32 out_pport = 0;
- u8 *eth_src = NULL;
- u8 *eth_src_mask = NULL;
+ const u8 *eth_src = NULL;
+ const u8 *eth_src_mask = NULL;
__be16 vlan_id_mask = htons(0xffff);
u8 ip_proto = 0;
u8 ip_proto_mask = 0;
@@ -3165,7 +3278,7 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
int err;
- err = rocker_flow_tbl_acl(rocker_port, flags,
+ err = rocker_flow_tbl_acl(rocker_port, trans, flags,
in_pport, in_pport_mask,
eth_src, eth_src_mask,
ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -3182,7 +3295,8 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
enum rocker_of_dpa_table_id goto_tbl =
@@ -3194,7 +3308,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- err = rocker_flow_tbl_bridge(rocker_port, flags,
+ err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
ctrl->eth_dst, ctrl->eth_dst_mask,
vlan_id, tunnel_id,
goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -3206,8 +3320,8 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
}
static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
- int flags, struct rocker_ctrl *ctrl,
- __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 vlan_id_mask = htons(0xffff);
@@ -3216,7 +3330,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
if (ntohs(vlan_id) == 0)
vlan_id = rocker_port->internal_vlan_id;
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
ctrl->eth_type, ctrl->eth_dst,
ctrl->eth_dst_mask, vlan_id,
@@ -3229,32 +3343,34 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
- struct rocker_ctrl *ctrl, __be16 vlan_id)
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
- return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+ return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
ctrl, vlan_id);
if (ctrl->bridge)
- return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+ return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
ctrl, vlan_id);
if (ctrl->term)
- return rocker_port_ctrl_vlan_term(rocker_port, flags,
+ return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
ctrl, vlan_id);
return -EOPNOTSUPP;
}
static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id)
{
int err = 0;
int i;
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (rocker_port->ctrls[i]) {
- err = rocker_port_ctrl_vlan(rocker_port, flags,
+ err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
&rocker_ctrls[i], vlan_id);
if (err)
return err;
@@ -3264,8 +3380,9 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
- struct rocker_ctrl *ctrl)
+static int rocker_port_ctrl(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags,
+ const struct rocker_ctrl *ctrl)
{
u16 vid;
int err = 0;
@@ -3273,7 +3390,7 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
for (vid = 1; vid < VLAN_N_VID; vid++) {
if (!test_bit(vid, rocker_port->vlan_bitmap))
continue;
- err = rocker_port_ctrl_vlan(rocker_port, flags,
+ err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
ctrl, htons(vid));
if (err)
break;
@@ -3282,8 +3399,8 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
return err;
}
-static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
- u16 vid)
+static int rocker_port_vlan(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags, u16 vid)
{
enum rocker_of_dpa_table_id goto_tbl =
ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3297,50 +3414,57 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
- if (adding && test_and_set_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
+ if (adding && test_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
return 0; /* already added */
- else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
+ else if (!adding && !test_bit(ntohs(internal_vlan_id),
+ rocker_port->vlan_bitmap))
return 0; /* already removed */
+ change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
if (adding) {
- err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+ err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
internal_vlan_id);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port ctrl vlan add\n", err);
- return err;
+ goto err_out;
}
}
- err = rocker_port_vlan_l2_groups(rocker_port, flags,
+ err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
internal_vlan_id, untagged);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 groups\n", err);
- return err;
+ goto err_out;
}
- err = rocker_port_vlan_flood_group(rocker_port, flags,
+ err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
internal_vlan_id);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 flood group\n", err);
- return err;
+ goto err_out;
}
- err = rocker_flow_tbl_vlan(rocker_port, flags,
+ err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
in_pport, vlan_id, vlan_id_mask,
goto_tbl, untagged, internal_vlan_id);
if (err)
netdev_err(rocker_port->dev,
"Error (%d) port VLAN table\n", err);
+err_out:
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
return err;
}
-static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, int flags)
{
enum rocker_of_dpa_table_id goto_tbl;
u32 in_pport;
@@ -3355,7 +3479,7 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
in_pport_mask = 0xffff0000;
goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
- err = rocker_flow_tbl_ig_port(rocker_port, flags,
+ err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
in_pport, in_pport_mask,
goto_tbl);
if (err)
@@ -3367,7 +3491,8 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
struct rocker_fdb_learn_work {
struct work_struct work;
- struct net_device *dev;
+ struct rocker_port *rocker_port;
+ enum switchdev_trans trans;
int flags;
u8 addr[ETH_ALEN];
u16 vid;
@@ -3375,27 +3500,28 @@ struct rocker_fdb_learn_work {
static void rocker_port_fdb_learn_work(struct work_struct *work)
{
- struct rocker_fdb_learn_work *lw =
+ const struct rocker_fdb_learn_work *lw =
container_of(work, struct rocker_fdb_learn_work, work);
bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
- struct netdev_switch_notifier_fdb_info info;
+ struct switchdev_notifier_fdb_info info;
info.addr = lw->addr;
info.vid = lw->vid;
if (learned && removing)
- call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
- lw->dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ lw->rocker_port->dev, &info.info);
else if (learned && !removing)
- call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
- lw->dev, &info.info);
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ lw->rocker_port->dev, &info.info);
- kfree(work);
+ rocker_port_kfree(lw->trans, work);
}
static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
- int flags, const u8 *addr, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ const u8 *addr, __be16 vlan_id)
{
struct rocker_fdb_learn_work *lw;
enum rocker_of_dpa_table_id goto_tbl =
@@ -3411,8 +3537,8 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
- err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
- vlan_id, tunnel_id, goto_tbl,
+ err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
+ NULL, vlan_id, tunnel_id, goto_tbl,
group_id, copy_to_cpu);
if (err)
return err;
@@ -3424,24 +3550,29 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
if (!rocker_port_is_bridged(rocker_port))
return 0;
- lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+ lw = rocker_port_kzalloc(rocker_port, trans, sizeof(*lw));
if (!lw)
return -ENOMEM;
INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
- lw->dev = rocker_port->dev;
+ lw->rocker_port = rocker_port;
+ lw->trans = trans;
lw->flags = flags;
ether_addr_copy(lw->addr, addr);
lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
- schedule_work(&lw->work);
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ rocker_port_kfree(trans, lw);
+ else
+ schedule_work(&lw->work);
return 0;
}
static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+rocker_fdb_tbl_find(const struct rocker *rocker,
+ const struct rocker_fdb_tbl_entry *match)
{
struct rocker_fdb_tbl_entry *found;
@@ -3453,6 +3584,7 @@ rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
}
static int rocker_port_fdb(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
const unsigned char *addr,
__be16 vlan_id, int flags)
{
@@ -3462,7 +3594,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
unsigned long lock_flags;
- fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+ fdb = rocker_port_kzalloc(rocker_port, trans, sizeof(*fdb));
if (!fdb)
return -ENOMEM;
@@ -3477,32 +3609,35 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
found = rocker_fdb_tbl_find(rocker, fdb);
if (removing && found) {
- kfree(fdb);
- hash_del(&found->entry);
+ rocker_port_kfree(trans, fdb);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
} else if (!removing && !found) {
- hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
}
spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
/* Check if adding and already exists, or removing and can't find */
if (!found != !removing) {
- kfree(fdb);
+ rocker_port_kfree(trans, fdb);
if (!found && removing)
return 0;
/* Refreshing existing to update aging timers */
flags |= ROCKER_OP_FLAG_REFRESH;
}
- return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+ return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
}
-static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_fdb_tbl_entry *found;
unsigned long lock_flags;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+ int flags = ROCKER_OP_FLAG_REMOVE;
struct hlist_node *tmp;
int bkt;
int err = 0;
@@ -3518,12 +3653,13 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
continue;
if (!found->learned)
continue;
- err = rocker_port_fdb_learn(rocker_port, flags,
+ err = rocker_port_fdb_learn(rocker_port, trans, flags,
found->key.addr,
found->key.vlan_id);
if (err)
goto err_out;
- hash_del(&found->entry);
+ if (trans != SWITCHDEV_TRANS_PREPARE)
+ hash_del(&found->entry);
}
err_out:
@@ -3533,7 +3669,8 @@ err_out:
}
static int rocker_port_router_mac(struct rocker_port *rocker_port,
- int flags, __be16 vlan_id)
+ enum switchdev_trans trans, int flags,
+ __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 eth_type;
@@ -3546,7 +3683,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
vlan_id = rocker_port->internal_vlan_id;
eth_type = htons(ETH_P_IP);
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3555,7 +3692,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
return err;
eth_type = htons(ETH_P_IPV6);
- err = rocker_flow_tbl_term_mac(rocker_port,
+ err = rocker_flow_tbl_term_mac(rocker_port, trans,
rocker_port->pport, in_pport_mask,
eth_type, rocker_port->dev->dev_addr,
dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3564,13 +3701,14 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
return err;
}
-static int rocker_port_fwding(struct rocker_port *rocker_port)
+static int rocker_port_fwding(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
bool pop_vlan;
u32 out_pport;
__be16 vlan_id;
u16 vid;
- int flags = ROCKER_OP_FLAG_NOWAIT;
+ int flags = 0;
int err;
/* Port will be forwarding-enabled if its STP state is LEARNING
@@ -3590,9 +3728,8 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
continue;
vlan_id = htons(vid);
pop_vlan = rocker_vlan_id_is_internal(vlan_id);
- err = rocker_group_l2_interface(rocker_port, flags,
- vlan_id, out_pport,
- pop_vlan);
+ err = rocker_group_l2_interface(rocker_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
if (err) {
netdev_err(rocker_port->dev,
"Error (%d) port VLAN l2 group for pport %d\n",
@@ -3604,13 +3741,21 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
return 0;
}
-static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+static int rocker_port_stp_update(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, u8 state)
{
bool want[ROCKER_CTRL_MAX] = { 0, };
+ bool prev_ctrls[ROCKER_CTRL_MAX];
+ u8 prev_state;
int flags;
int err;
int i;
+ if (trans == SWITCHDEV_TRANS_PREPARE) {
+ memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
+ prev_state = rocker_port->stp_state;
+ }
+
if (rocker_port->stp_state == state)
return 0;
@@ -3638,45 +3783,54 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
for (i = 0; i < ROCKER_CTRL_MAX; i++) {
if (want[i] != rocker_port->ctrls[i]) {
- flags = ROCKER_OP_FLAG_NOWAIT |
- (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
- err = rocker_port_ctrl(rocker_port, flags,
+ flags = (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+ err = rocker_port_ctrl(rocker_port, trans, flags,
&rocker_ctrls[i]);
if (err)
- return err;
+ goto err_out;
rocker_port->ctrls[i] = want[i];
}
}
- err = rocker_port_fdb_flush(rocker_port);
+ err = rocker_port_fdb_flush(rocker_port, trans);
if (err)
- return err;
+ goto err_out;
+
+ err = rocker_port_fwding(rocker_port, trans);
- return rocker_port_fwding(rocker_port);
+err_out:
+ if (trans == SWITCHDEV_TRANS_PREPARE) {
+ memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+ rocker_port->stp_state = prev_state;
+ }
+
+ return err;
}
-static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will enable port */
return 0;
/* port is not bridged, so simulate going to FORWARDING state */
- return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+ return rocker_port_stp_update(rocker_port, trans, BR_STATE_FORWARDING);
}
-static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
+ enum switchdev_trans trans)
{
if (rocker_port_is_bridged(rocker_port))
/* bridge STP will disable port */
return 0;
/* port is not bridged, so simulate going to DISABLED state */
- return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+ return rocker_port_stp_update(rocker_port, trans, BR_STATE_DISABLED);
}
static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
{
struct rocker_internal_vlan_tbl_entry *found;
@@ -3731,8 +3885,9 @@ found:
return found->vlan_id;
}
-static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
- int ifindex)
+static void
+rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
+ int ifindex)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_internal_vlan_tbl_entry *found;
@@ -3760,11 +3915,12 @@ not_found:
spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
}
-static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
- int dst_len, struct fib_info *fi, u32 tb_id,
- int flags)
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, __be32 dst,
+ int dst_len, const struct fib_info *fi,
+ u32 tb_id, int flags)
{
- struct fib_nh *nh;
+ const struct fib_nh *nh;
__be16 eth_type = htons(ETH_P_IP);
__be32 dst_mask = inet_make_mask(dst_len);
__be16 internal_vlan_id = rocker_port->internal_vlan_id;
@@ -3784,7 +3940,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
has_gw = !!nh->nh_gw;
if (has_gw && nh_on_port) {
- err = rocker_port_ipv4_nh(rocker_port, flags,
+ err = rocker_port_ipv4_nh(rocker_port, trans, flags,
nh->nh_gw, &index);
if (err)
return err;
@@ -3795,7 +3951,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
}
- err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+ err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
dst_mask, priority, goto_tbl,
group_id, flags);
if (err)
@@ -3834,7 +3990,7 @@ static int rocker_port_open(struct net_device *dev)
goto err_request_rx_irq;
}
- err = rocker_port_fwd_enable(rocker_port);
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
if (err)
goto err_fwd_enable;
@@ -3861,7 +4017,7 @@ static int rocker_port_stop(struct net_device *dev)
rocker_port_set_enable(rocker_port, false);
napi_disable(&rocker_port->napi_rx);
napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port);
+ rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE);
free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
rocker_port_dma_rings_fini(rocker_port);
@@ -3869,12 +4025,12 @@ static int rocker_port_stop(struct net_device *dev)
return 0;
}
-static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info)
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
- struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
struct rocker_tlv *attr;
int rem;
@@ -3882,7 +4038,7 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
if (!attrs[ROCKER_TLV_TX_FRAGS])
return;
rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
- struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
dma_addr_t dma_handle;
size_t len;
@@ -3899,11 +4055,11 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
}
}
-static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
char *buf, size_t buf_len)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
struct rocker_tlv *frag;
@@ -4008,269 +4164,333 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
{
struct rocker_port *rocker_port = netdev_priv(dev);
+ struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_port_vlan(rocker_port, 0, vid);
- if (err)
- return err;
+ err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_phys_name_proc,
+ &name);
- return rocker_port_router_mac(rocker_port, 0, htons(vid));
+ return err ? -EOPNOTSUPP : 0;
}
-static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
+static const struct net_device_ops rocker_port_netdev_ops = {
+ .ndo_open = rocker_port_open,
+ .ndo_stop = rocker_port_stop,
+ .ndo_start_xmit = rocker_port_xmit,
+ .ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
- err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
- htons(vid));
- if (err)
- return err;
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(rocker->hw.id);
+ memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags = rocker_port->brport_flags;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+ return 0;
}
-static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid,
- u16 nlm_flags)
+static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
- int flags = 0;
+ struct list_head *mem, *tmp;
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
-
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
+ list_del(mem);
+ kfree(mem);
+ }
}
-static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 vid)
+static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ unsigned long brport_flags)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ unsigned long orig_flags;
+ int err = 0;
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
+ orig_flags = rocker_port->brport_flags;
+ rocker_port->brport_flags = brport_flags;
+ if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
+ err = rocker_port_set_learning(rocker_port, trans);
- return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+ if (trans == SWITCHDEV_TRANS_PREPARE)
+ rocker_port->brport_flags = orig_flags;
+
+ return err;
}
-static int rocker_fdb_fill_info(struct sk_buff *skb,
- struct rocker_port *rocker_port,
- const unsigned char *addr, u16 vid,
- u32 portid, u32 seq, int type,
- unsigned int flags)
+static int rocker_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
{
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
- nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
- if (!nlh)
- return -EMSGSIZE;
+ switch (attr->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ BUG_ON(!list_empty(&rocker_port->trans_mem));
+ break;
+ case SWITCHDEV_TRANS_ABORT:
+ rocker_port_trans_abort(rocker_port);
+ return 0;
+ default:
+ break;
+ }
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = rocker_port->dev->ifindex;
- ndm->ndm_state = NUD_REACHABLE;
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_STP_STATE:
+ err = rocker_port_stp_update(rocker_port, attr->trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+ err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+ attr->u.brport_flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
- if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
+ return err;
+}
- if (vid && nla_put_u16(skb, NDA_VLAN, vid))
- goto nla_put_failure;
+static int rocker_port_vlan_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans, u16 vid, u16 flags)
+{
+ int err;
- nlmsg_end(skb, nlh);
- return 0;
+ /* XXX deal with flags for PVID and untagged */
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ err = rocker_port_vlan(rocker_port, trans, 0, vid);
+ if (err)
+ return err;
+
+ err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+ if (err)
+ rocker_port_vlan(rocker_port, trans,
+ ROCKER_OP_FLAG_REMOVE, vid);
+
+ return err;
}
-static int rocker_port_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- struct net_device *filter_dev,
- int idx)
+static int rocker_port_vlans_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_vlan *vlan)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_fdb_tbl_entry *found;
- struct hlist_node *tmp;
- int bkt;
- unsigned long lock_flags;
- const unsigned char *addr;
u16 vid;
int err;
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.pport != rocker_port->pport)
- continue;
- if (idx < cb->args[0])
- goto skip;
- addr = found->key.addr;
- vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
- err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH, NLM_F_MULTI);
- if (err < 0)
- break;
-skip:
- ++idx;
+ for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
+ err = rocker_port_vlan_add(rocker_port, trans,
+ vid, vlan->flags);
+ if (err)
+ return err;
}
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
- return idx;
+
+ return 0;
}
-static int rocker_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+static int rocker_port_fdb_add(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_fdb *fdb)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct nlattr *protinfo;
- struct nlattr *attr;
- int err;
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+ int flags = 0;
- protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
- IFLA_PROTINFO);
- if (protinfo) {
- attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
- if (attr) {
- if (nla_len(attr) < sizeof(u8))
- return -EINVAL;
-
- if (nla_get_u8(attr))
- rocker_port->brport_flags |= BR_LEARNING;
- else
- rocker_port->brport_flags &= ~BR_LEARNING;
- err = rocker_port_set_learning(rocker_port);
- if (err)
- return err;
- }
- attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
- if (attr) {
- if (nla_len(attr) < sizeof(u8))
- return -EINVAL;
-
- if (nla_get_u8(attr))
- rocker_port->brport_flags |= BR_LEARNING_SYNC;
- else
- rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
- }
- }
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
- return 0;
+ return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
}
-static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask, int nlflags)
+static int rocker_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- u16 mode = BRIDGE_MODE_UNDEF;
- u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+ const struct switchdev_obj_ipv4_fib *fib4;
+ int err = 0;
+
+ switch (obj->trans) {
+ case SWITCHDEV_TRANS_PREPARE:
+ BUG_ON(!list_empty(&rocker_port->trans_mem));
+ break;
+ case SWITCHDEV_TRANS_ABORT:
+ rocker_port_trans_abort(rocker_port);
+ return 0;
+ default:
+ break;
+ }
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlans_add(rocker_port, obj->trans,
+ &obj->u.vlan);
+ break;
+ case SWITCHDEV_OBJ_IPV4_FIB:
+ fib4 = &obj->u.ipv4_fib;
+ err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+ htonl(fib4->dst), fib4->dst_len,
+ fib4->fi, fib4->tb_id, 0);
+ break;
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
- rocker_port->brport_flags, mask,
- nlflags);
+ return err;
}
-static int rocker_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
+static int rocker_port_vlan_del(struct rocker_port *rocker_port,
+ u16 vid, u16 flags)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
- rocker_cmd_get_port_settings_prep, NULL,
- rocker_cmd_get_port_settings_phys_name_proc,
- &name, false);
+ err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE, htons(vid));
+ if (err)
+ return err;
- return err ? -EOPNOTSUPP : 0;
+ return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE, vid);
}
-static const struct net_device_ops rocker_port_netdev_ops = {
- .ndo_open = rocker_port_open,
- .ndo_stop = rocker_port_stop,
- .ndo_start_xmit = rocker_port_xmit,
- .ndo_set_mac_address = rocker_port_set_mac_address,
- .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
- .ndo_fdb_add = rocker_port_fdb_add,
- .ndo_fdb_del = rocker_port_fdb_del,
- .ndo_fdb_dump = rocker_port_fdb_dump,
- .ndo_bridge_setlink = rocker_port_bridge_setlink,
- .ndo_bridge_getlink = rocker_port_bridge_getlink,
- .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
-};
+static int rocker_port_vlans_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_vlan *vlan)
+{
+ u16 vid;
+ int err;
-/********************
- * swdev interface
- ********************/
+ for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
+ err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
-static int rocker_port_swdev_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static int rocker_port_fdb_del(struct rocker_port *rocker_port,
+ enum switchdev_trans trans,
+ const struct switchdev_obj_fdb *fdb)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
+ __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+ int flags = ROCKER_OP_FLAG_REMOVE;
- psid->id_len = sizeof(rocker->hw.id);
- memcpy(&psid->id, &rocker->hw.id, psid->id_len);
- return 0;
+ if (!rocker_port_is_bridged(rocker_port))
+ return -EINVAL;
+
+ return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
}
-static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+static int rocker_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct switchdev_obj_ipv4_fib *fib4;
+ int err = 0;
- return rocker_port_stp_update(rocker_port, state);
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_VLAN:
+ err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+ break;
+ case SWITCHDEV_OBJ_IPV4_FIB:
+ fib4 = &obj->u.ipv4_fib;
+ err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+ htonl(fib4->dst), fib4->dst_len,
+ fib4->fi, fib4->tb_id,
+ ROCKER_OP_FLAG_REMOVE);
+ break;
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
- __be32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
+static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = 0;
+ struct rocker *rocker = rocker_port->rocker;
+ struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+ struct rocker_fdb_tbl_entry *found;
+ struct hlist_node *tmp;
+ unsigned long lock_flags;
+ int bkt;
+ int err = 0;
+
+ spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+ hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.pport != rocker_port->pport)
+ continue;
+ fdb->addr = found->key.addr;
+ fdb->vid = rocker_port_vlan_to_vid(rocker_port,
+ found->key.vlan_id);
+ err = obj->cb(rocker_port->dev, obj);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
- return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
- fi, tb_id, flags);
+ return err;
}
-static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
- __be32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
+static int rocker_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = ROCKER_OP_FLAG_REMOVE;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
- return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
- fi, tb_id, flags);
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_PORT_FDB:
+ err = rocker_port_fdb_dump(rocker_port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
}
-static const struct swdev_ops rocker_port_swdev_ops = {
- .swdev_parent_id_get = rocker_port_swdev_parent_id_get,
- .swdev_port_stp_update = rocker_port_swdev_port_stp_update,
- .swdev_fib_ipv4_add = rocker_port_swdev_fib_ipv4_add,
- .swdev_fib_ipv4_del = rocker_port_swdev_fib_ipv4_del,
+static const struct switchdev_ops rocker_port_switchdev_ops = {
+ .switchdev_port_attr_get = rocker_port_attr_get,
+ .switchdev_port_attr_set = rocker_port_attr_set,
+ .switchdev_port_obj_add = rocker_port_obj_add,
+ .switchdev_port_obj_del = rocker_port_obj_del,
+ .switchdev_port_obj_dump = rocker_port_obj_dump,
};
/********************
@@ -4334,8 +4554,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
}
static int
-rocker_cmd_get_port_stats_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -4359,14 +4578,13 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
- struct rocker_tlv *pattr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+ const struct rocker_tlv *pattr;
u32 pport;
u64 *data = priv;
int i;
@@ -4400,10 +4618,10 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
- priv, false);
+ priv);
}
static void rocker_port_get_stats(struct net_device *dev,
@@ -4417,8 +4635,6 @@ static void rocker_port_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
data[i] = 0;
}
-
- return;
}
static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
@@ -4453,8 +4669,8 @@ static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4472,8 +4688,9 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
if (err == 0) {
rocker_port->dev->stats.tx_packets++;
rocker_port->dev->stats.tx_bytes += skb->len;
- } else
+ } else {
rocker_port->dev->stats.tx_errors++;
+ }
dev_kfree_skb_any(skb);
credits++;
@@ -4488,11 +4705,11 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-static int rocker_port_rx_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_port_rx_proc(const struct rocker *rocker,
+ const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
@@ -4514,7 +4731,7 @@ static int rocker_port_rx_proc(struct rocker *rocker,
netif_receive_skb(skb);
- return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+ return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
}
static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
@@ -4525,7 +4742,7 @@ static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4565,9 +4782,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
* PCI driver ops
*****************/
-static void rocker_carrier_init(struct rocker_port *rocker_port)
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
@@ -4578,23 +4795,24 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
netif_carrier_off(rocker_port->dev);
}
-static void rocker_remove_ports(struct rocker *rocker)
+static void rocker_remove_ports(const struct rocker *rocker)
{
struct rocker_port *rocker_port;
int i;
for (i = 0; i < rocker->port_count; i++) {
rocker_port = rocker->ports[i];
- rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
-static void rocker_port_dev_addr_init(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_cmd_get_port_settings_macaddr(rocker_port,
@@ -4607,9 +4825,10 @@ static void rocker_port_dev_addr_init(struct rocker *rocker,
static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_port *rocker_port;
struct net_device *dev;
+ u16 untagged_vid = 0;
int err;
dev = alloc_etherdev(sizeof(struct rocker_port));
@@ -4621,20 +4840,19 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->port_number = port_number;
rocker_port->pport = port_number + 1;
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+ INIT_LIST_HEAD(&rocker_port->trans_mem);
- rocker_port_dev_addr_init(rocker, rocker_port);
+ rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
- dev->swdev_ops = &rocker_port_swdev_ops;
+ dev->switchdev_ops = &rocker_port_switchdev_ops;
netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
NAPI_POLL_WEIGHT);
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_SWITCH_OFFLOAD;
+ dev->features |= NETIF_F_NETNS_LOCAL;
err = register_netdev(dev);
if (err) {
@@ -4643,18 +4861,29 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
}
rocker->ports[port_number] = rocker_port;
- rocker_port_set_learning(rocker_port);
+ rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
- err = rocker_port_ig_tbl(rocker_port, 0);
+ err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err) {
dev_err(&pdev->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
+ if (err) {
+ netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
+ goto err_untagged_vlan;
+ }
+
return 0;
+err_untagged_vlan:
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
unregister_netdev(dev);
err_register_netdev:
@@ -4669,7 +4898,7 @@ static int rocker_probe_ports(struct rocker *rocker)
int err;
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
- rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!rocker->ports)
return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
@@ -4718,7 +4947,7 @@ err_enable_msix:
return err;
}
-static void rocker_msix_fini(struct rocker *rocker)
+static void rocker_msix_fini(const struct rocker *rocker)
{
pci_disable_msix(rocker->pdev);
kfree(rocker->msix_entries);
@@ -4884,7 +5113,7 @@ static struct pci_driver rocker_pci_driver = {
* Net device notifier event handler
************************************/
-static bool rocker_port_dev_check(struct net_device *dev)
+static bool rocker_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &rocker_port_netdev_ops;
}
@@ -4892,45 +5121,54 @@ static bool rocker_port_dev_check(struct net_device *dev)
static int rocker_port_bridge_join(struct rocker_port *rocker_port,
struct net_device *bridge)
{
+ u16 untagged_vid = 0;
int err;
+ /* Port is joining bridge, so the internal VLAN for the
+ * port is going to change to the bridge internal VLAN.
+ * Let's remove untagged VLAN (vid=0) from port and
+ * re-add once internal VLAN has changed.
+ */
+
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
+ if (err)
+ return err;
+
rocker_port_internal_vlan_id_put(rocker_port,
rocker_port->dev->ifindex);
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
rocker_port->bridge_dev = bridge;
- /* Use bridge internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
- if (err)
- return err;
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port,
- bridge->ifindex);
- return rocker_port_vlan(rocker_port, 0, 0);
+ return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
{
+ u16 untagged_vid = 0;
int err;
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->bridge_dev->ifindex);
-
- rocker_port->bridge_dev = NULL;
-
- /* Use port internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
if (err)
return err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->bridge_dev->ifindex);
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
- err = rocker_port_vlan(rocker_port, 0, 0);
+
+ rocker_port->bridge_dev = NULL;
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
if (err)
return err;
if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port);
+ err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
return err;
}
@@ -4992,7 +5230,8 @@ static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
__be32 ip_addr = *(__be32 *)n->primary_key;
- return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+ return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+ flags, ip_addr, n->ha);
}
static int rocker_netevent_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index a4e9591d7457..c61fbf968036 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -65,9 +65,9 @@ enum {
#define ROCKER_TEST_DMA_CTRL 0x0034
/* Rocker test register ctrl */
-#define ROCKER_TEST_DMA_CTRL_CLEAR (1 << 0)
-#define ROCKER_TEST_DMA_CTRL_FILL (1 << 1)
-#define ROCKER_TEST_DMA_CTRL_INVERT (1 << 2)
+#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0)
+#define ROCKER_TEST_DMA_CTRL_FILL BIT(1)
+#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2)
/* Rocker DMA ring register offsets */
#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
@@ -79,7 +79,7 @@ enum {
#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
/* Rocker dma ctrl register bits */
-#define ROCKER_DMA_DESC_CTRL_RESET (1 << 0)
+#define ROCKER_DMA_DESC_CTRL_RESET BIT(0)
/* Rocker DMA ring types */
enum rocker_dma_type {
@@ -111,7 +111,7 @@ struct rocker_desc {
u16 comp_err;
};
-#define ROCKER_DMA_DESC_COMP_ERR_GEN (1 << 15)
+#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15)
/* Rocker DMA TLV struct */
struct rocker_tlv {
@@ -237,14 +237,14 @@ enum {
ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
};
-#define ROCKER_RX_FLAGS_IPV4 (1 << 0)
-#define ROCKER_RX_FLAGS_IPV6 (1 << 1)
-#define ROCKER_RX_FLAGS_CSUM_CALC (1 << 2)
-#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD (1 << 3)
-#define ROCKER_RX_FLAGS_IP_FRAG (1 << 4)
-#define ROCKER_RX_FLAGS_TCP (1 << 5)
-#define ROCKER_RX_FLAGS_UDP (1 << 6)
-#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD (1 << 7)
+#define ROCKER_RX_FLAGS_IPV4 BIT(0)
+#define ROCKER_RX_FLAGS_IPV6 BIT(1)
+#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3)
+#define ROCKER_RX_FLAGS_IP_FRAG BIT(4)
+#define ROCKER_RX_FLAGS_TCP BIT(5)
+#define ROCKER_RX_FLAGS_UDP BIT(6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
enum {
ROCKER_TLV_TX_UNSPEC,
@@ -460,6 +460,6 @@ enum rocker_of_dpa_overlay_type {
#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
/* Rocker control bits */
-#define ROCKER_CONTROL_RESET (1 << 0)
+#define ROCKER_CONTROL_RESET BIT(0)
#endif
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 088921294448..4dd92b7b80f4 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -36,3 +36,12 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
+config SFC_MCDI_LOGGING
+ bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+ depends on SFC
+ default y
+ ---help---
+ This enables support for tracing of MCDI (Management-Controller-to-
+ Driver-Interface) commands and responses, allowing debugging of
+ driver/firmware interaction. The tracing is actually enabled by
+ a sysfs file 'mcdi_logging' under the PCI device.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 3a83c0dca8e6..ce8470fe79d5 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -3,6 +3,6 @@ sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
-sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
+sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index fbb6cfa0f5f1..847643455468 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -15,6 +15,7 @@
#include "nic.h"
#include "workarounds.h"
#include "selftest.h"
+#include "ef10_sriov.h"
#include <linux/in.h>
#include <linux/jhash.h>
#include <linux/wait.h>
@@ -30,6 +31,9 @@ enum {
/* The reserved RSS context value */
#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
/* The filter table(s) are managed by firmware and we have write-only
* access. When removing filters we must identify them to the
@@ -77,7 +81,6 @@ struct efx_ef10_filter_table {
/* An arbitrary search limit for the software hash table */
#define EFX_EF10_FILTER_SEARCH_LIMIT 200
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
@@ -92,8 +95,49 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
{
- return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+ int bar;
+
+ bar = efx->type->mem_bar;
+ return resource_size(&efx->pci_dev->resource[bar]);
+}
+
+static int efx_ef10_get_pf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+ return 0;
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_get_vf_index(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+
+ nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
+ return 0;
}
+#endif
static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
{
@@ -117,6 +161,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ /* record the DPCPU firmware IDs to determine VEB vswitching support.
+ */
+ nic_data->rx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+ nic_data->tx_dpcpu_fw_id =
+ MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
if (!(nic_data->datapath_caps &
(1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
netif_err(efx, drv, efx->net_dev,
@@ -147,7 +198,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
return rc > 0 ? rc : -ERANGE;
}
-static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
size_t outlen;
@@ -167,9 +218,66 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
return 0;
}
+static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
+ size_t outlen;
+ int num_addrs, rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
+ return -EIO;
+
+ num_addrs = MCDI_DWORD(outbuf,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
+
+ WARN_ON(num_addrs != 1);
+
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
+
+ return 0;
+}
+
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+ NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
static int efx_ef10_probe(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data;
+ struct net_device *net_dev = efx->net_dev;
int i, rc;
/* We can have one VI for each 8K region. However, until we
@@ -178,7 +286,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->max_channels =
min_t(unsigned int,
EFX_MAX_CHANNELS,
- resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ efx_ef10_mem_map_size(efx) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
if (WARN_ON(efx->max_channels == 0))
return -EIO;
@@ -188,6 +296,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
if (rc)
@@ -209,6 +320,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this
* function. We send a special message using the least
@@ -230,45 +343,85 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_link_control_flag);
+ if (rc)
+ goto fail3;
+
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ if (rc)
+ goto fail4;
+
+ rc = efx_ef10_get_pf_index(efx);
+ if (rc)
+ goto fail5;
+
rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->port_num = rc;
+ net_dev->dev_port = rc;
- rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
if (rc)
- goto fail3;
+ goto fail5;
rc = efx_ef10_get_sysclk_freq(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
- /* Check whether firmware supports bug 35388 workaround */
+ /* Check whether firmware supports bug 35388 workaround.
+ * First try to enable it, then if we get EPERM, just
+ * ask if it's already enabled
+ */
rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
- if (rc == 0)
+ if (rc == 0) {
nic_data->workaround_35388 = true;
- else if (rc != -ENOSYS && rc != -ENOENT)
- goto fail3;
+ } else if (rc == -EPERM) {
+ unsigned int enabled;
+
+ rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
+ if (rc)
+ goto fail3;
+ nic_data->workaround_35388 = enabled &
+ MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+ } else if (rc != -ENOSYS && rc != -ENOENT) {
+ goto fail5;
+ }
netif_dbg(efx, probe, efx->net_dev,
"workaround for bug 35388 is %sabled\n",
nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
- if (rc)
- goto fail3;
+ if (rc && rc != -EPERM)
+ goto fail5;
efx_ptp_probe(efx, NULL);
+#ifdef CONFIG_SFC_SRIOV
+ if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+ } else
+#endif
+ ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
return 0;
+fail5:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
fail3:
efx_mcdi_fini(efx);
fail2:
@@ -281,7 +434,7 @@ fail1:
static int efx_ef10_free_vis(struct efx_nic *efx)
{
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
size_t outlen;
int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
outbuf, sizeof(outbuf), &outlen);
@@ -352,9 +505,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
static int efx_ef10_link_piobufs(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- MCDI_DECLARE_BUF(inbuf,
- max(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_IN_LEN));
+ _MCDI_DECLARE_BUF(inbuf,
+ max(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_IN_LEN));
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned int offset, index;
@@ -363,6 +516,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+ memset(inbuf, 0, sizeof(inbuf));
+
/* Link a buffer to each VI in the write-combining mapping */
for (index = 0; index < nic_data->n_piobufs; ++index) {
MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -475,6 +630,25 @@ static void efx_ef10_remove(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
+#ifdef CONFIG_SFC_SRIOV
+ struct efx_ef10_nic_data *nic_data_pf;
+ struct pci_dev *pci_dev_pf;
+ struct efx_nic *efx_pf;
+ struct ef10_vf *vf;
+
+ if (efx->pci_dev->is_virtfn) {
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ efx_pf = pci_get_drvdata(pci_dev_pf);
+ nic_data_pf = efx_pf->nic_data;
+ vf = nic_data_pf->vf + nic_data->vf_index;
+ vf->efx = NULL;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+#endif
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -490,11 +664,78 @@ static void efx_ef10_remove(struct efx_nic *efx)
if (!nic_data->must_restore_piobufs)
efx_ef10_free_piobufs(efx);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
}
+static int efx_ef10_probe_pf(struct efx_nic *efx)
+{
+ return efx_ef10_probe(efx);
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_probe_vf(struct efx_nic *efx)
+{
+ int rc;
+ struct pci_dev *pci_dev_pf;
+
+ /* If the parent PF has no VF data structure, it doesn't know about this
+ * VF so fail probe. The VF needs to be re-created. This can happen
+ * if the PF driver is unloaded while the VF is assigned to a guest.
+ */
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+ if (!nic_data_pf->vf) {
+ netif_info(efx, drv, efx->net_dev,
+ "The VF cannot link to its parent PF; "
+ "please destroy and re-create the VF\n");
+ return -EBUSY;
+ }
+ }
+
+ rc = efx_ef10_probe(efx);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_get_vf_index(efx);
+ if (rc)
+ goto fail;
+
+ if (efx->pci_dev->is_virtfn) {
+ if (efx->pci_dev->physfn) {
+ struct efx_nic *efx_pf =
+ pci_get_drvdata(efx->pci_dev->physfn);
+ struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ nic_data_p->vf[nic_data->vf_index].efx = efx;
+ nic_data_p->vf[nic_data->vf_index].pci_dev =
+ efx->pci_dev;
+ } else
+ netif_info(efx, drv, efx->net_dev,
+ "Could not get the PF id from VF\n");
+ }
+
+ return 0;
+
+fail:
+ efx_ef10_remove(efx);
+ return rc;
+}
+#else
+static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
+{
+ return 0;
+}
+#endif
+
static int efx_ef10_alloc_vis(struct efx_nic *efx,
unsigned int min_vis, unsigned int max_vis)
{
@@ -687,7 +928,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
- efx_ef10_rx_push_rss_config(efx);
+ /* don't fail init if RSS setup doesn't work */
+ efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+
return 0;
}
@@ -702,6 +945,14 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
+static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
+{
+ if (reason == RESET_TYPE_MC_FAILURE)
+ return RESET_TYPE_DATAPATH;
+
+ return efx_mcdi_map_reset_reason(reason);
+}
+
static int efx_ef10_map_reset_flags(u32 *flags)
{
enum {
@@ -760,93 +1011,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
- EF10_DMA_STAT(tx_bytes, TX_BYTES),
- EF10_DMA_STAT(tx_packets, TX_PKTS),
- EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
- EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
- EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
- EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
- EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
- EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
- EF10_DMA_STAT(tx_64, TX_64_PKTS),
- EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
- EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
- EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
- EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
- EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_bytes, RX_BYTES),
- EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
- EF10_OTHER_STAT(rx_good_bytes),
- EF10_OTHER_STAT(rx_bad_bytes),
- EF10_DMA_STAT(rx_packets, RX_PKTS),
- EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
- EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
- EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
- EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
- EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
- EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
- EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
- EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
- EF10_DMA_STAT(rx_64, RX_64_PKTS),
- EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
- EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
- EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
- EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
- EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
- EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
- EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
- EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
- EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
- EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(port_rx_good_bytes),
+ EF10_OTHER_STAT(port_rx_bad_bytes),
+ EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
GENERIC_SW_STAT(rx_nodesc_trunc),
GENERIC_SW_STAT(rx_noskb_drops),
- EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
- EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
- EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
- EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
- EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
- EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+ EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+ EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+ EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+ EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+ EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+ EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+ EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+ EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+ EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+ EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+ EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+ EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+ EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+ EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
};
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
- (1ULL << EF10_STAT_tx_packets) | \
- (1ULL << EF10_STAT_tx_pause) | \
- (1ULL << EF10_STAT_tx_unicast) | \
- (1ULL << EF10_STAT_tx_multicast) | \
- (1ULL << EF10_STAT_tx_broadcast) | \
- (1ULL << EF10_STAT_rx_bytes) | \
- (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
- (1ULL << EF10_STAT_rx_good_bytes) | \
- (1ULL << EF10_STAT_rx_bad_bytes) | \
- (1ULL << EF10_STAT_rx_packets) | \
- (1ULL << EF10_STAT_rx_good) | \
- (1ULL << EF10_STAT_rx_bad) | \
- (1ULL << EF10_STAT_rx_pause) | \
- (1ULL << EF10_STAT_rx_control) | \
- (1ULL << EF10_STAT_rx_unicast) | \
- (1ULL << EF10_STAT_rx_multicast) | \
- (1ULL << EF10_STAT_rx_broadcast) | \
- (1ULL << EF10_STAT_rx_lt64) | \
- (1ULL << EF10_STAT_rx_64) | \
- (1ULL << EF10_STAT_rx_65_to_127) | \
- (1ULL << EF10_STAT_rx_128_to_255) | \
- (1ULL << EF10_STAT_rx_256_to_511) | \
- (1ULL << EF10_STAT_rx_512_to_1023) | \
- (1ULL << EF10_STAT_rx_1024_to_15xx) | \
- (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
- (1ULL << EF10_STAT_rx_gtjumbo) | \
- (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
- (1ULL << EF10_STAT_rx_overflow) | \
- (1ULL << EF10_STAT_rx_nodesc_drops) | \
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
+ (1ULL << EF10_STAT_port_tx_packets) | \
+ (1ULL << EF10_STAT_port_tx_pause) | \
+ (1ULL << EF10_STAT_port_tx_unicast) | \
+ (1ULL << EF10_STAT_port_tx_multicast) | \
+ (1ULL << EF10_STAT_port_tx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_bytes) | \
+ (1ULL << \
+ EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_port_rx_packets) | \
+ (1ULL << EF10_STAT_port_rx_good) | \
+ (1ULL << EF10_STAT_port_rx_bad) | \
+ (1ULL << EF10_STAT_port_rx_pause) | \
+ (1ULL << EF10_STAT_port_rx_control) | \
+ (1ULL << EF10_STAT_port_rx_unicast) | \
+ (1ULL << EF10_STAT_port_rx_multicast) | \
+ (1ULL << EF10_STAT_port_rx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_lt64) | \
+ (1ULL << EF10_STAT_port_rx_64) | \
+ (1ULL << EF10_STAT_port_rx_65_to_127) | \
+ (1ULL << EF10_STAT_port_rx_128_to_255) | \
+ (1ULL << EF10_STAT_port_rx_256_to_511) | \
+ (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+ (1ULL << EF10_STAT_port_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+ (1ULL << EF10_STAT_port_rx_overflow) | \
+ (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
@@ -854,39 +1124,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
* switchable port we do not expose these because they might not
* include all the packets they should.
*/
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
- (1ULL << EF10_STAT_tx_lt64) | \
- (1ULL << EF10_STAT_tx_64) | \
- (1ULL << EF10_STAT_tx_65_to_127) | \
- (1ULL << EF10_STAT_tx_128_to_255) | \
- (1ULL << EF10_STAT_tx_256_to_511) | \
- (1ULL << EF10_STAT_tx_512_to_1023) | \
- (1ULL << EF10_STAT_tx_1024_to_15xx) | \
- (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
+ (1ULL << EF10_STAT_port_tx_lt64) | \
+ (1ULL << EF10_STAT_port_tx_64) | \
+ (1ULL << EF10_STAT_port_tx_65_to_127) |\
+ (1ULL << EF10_STAT_port_tx_128_to_255) |\
+ (1ULL << EF10_STAT_port_tx_256_to_511) |\
+ (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
/* These statistics are only provided by the 40G MAC. For a 10G/40G
* switchable port we do expose these because the errors will otherwise
* be silent.
*/
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
- (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+ (1ULL << EF10_STAT_port_rx_length_error))
/* These statistics are only provided if the firmware supports the
* capability PM_AND_RXDP_COUNTERS.
*/
#define HUNT_PM_AND_RXDP_STAT_MASK ( \
- (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
- (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
- (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
- (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
- (1ULL << EF10_STAT_rx_dp_hlb_wait))
+ (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -894,6 +1164,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
u32 port_caps = efx_mcdi_phy_get_caps(efx);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ if (!(efx->mcdi->fn_flags &
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ return 0;
+
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
@@ -908,13 +1182,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
{
- u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 raw_mask[2];
+
+ raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+ /* Only show vadaptor stats when EVB capability is present */
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+ raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+ raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ } else {
+ raw_mask[1] = 0;
+ }
#if BITS_PER_LONG == 64
- mask[0] = raw_mask;
+ mask[0] = raw_mask[0];
+ mask[1] = raw_mask[1];
#else
- mask[0] = raw_mask & 0xffffffff;
- mask[1] = raw_mask >> 32;
+ mask[0] = raw_mask[0] & 0xffffffff;
+ mask[1] = raw_mask[0] >> 32;
+ mask[2] = raw_mask[1] & 0xffffffff;
+ mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -927,7 +1216,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
mask, names);
}
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+ stats[EF10_STAT_rx_multicast] +
+ stats[EF10_STAT_rx_broadcast];
+ core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+ stats[EF10_STAT_tx_multicast] +
+ stats[EF10_STAT_tx_broadcast];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+ stats[EF10_STAT_rx_multicast_bytes] +
+ stats[EF10_STAT_rx_broadcast_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+ stats[EF10_STAT_tx_multicast_bytes] +
+ stats[EF10_STAT_tx_broadcast_bytes];
+ core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = core_stats->rx_crc_errors;
+ core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ }
+
+ return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -952,67 +1285,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
- efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
- stats[EF10_STAT_rx_good_bytes] =
- stats[EF10_STAT_rx_bytes] -
- stats[EF10_STAT_rx_bytes_minus_good_bytes];
- efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
- stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[EF10_STAT_port_rx_nodesc_drops]);
+ stats[EF10_STAT_port_rx_good_bytes] =
+ stats[EF10_STAT_port_rx_bytes] -
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
efx_update_sw_stats(efx, stats);
return 0;
}
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
- struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
- DECLARE_BITMAP(mask, EF10_STAT_COUNT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u64 *stats = nic_data->stats;
- size_t stats_count = 0, index;
int retry;
- efx_ef10_get_stat_mask(efx, mask);
-
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
for (retry = 0; retry < 100; ++retry) {
- if (efx_ef10_try_update_nic_stats(efx) == 0)
+ if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
break;
udelay(100);
}
- if (full_stats) {
- for_each_set_bit(index, mask, EF10_STAT_COUNT) {
- if (efx_ef10_stat_desc[index].name) {
- *full_stats++ = stats[index];
- ++stats_count;
- }
- }
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ struct efx_buffer stats_buf;
+ __le64 *dma_stats;
+ int rc;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ if (in_interrupt()) {
+ /* If in atomic context, cannot update stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ spin_lock_bh(&efx->stats_lock);
+ efx_update_sw_stats(efx, stats);
+ return 0;
}
- if (core_stats) {
- core_stats->rx_packets = stats[EF10_STAT_rx_packets];
- core_stats->tx_packets = stats[EF10_STAT_tx_packets];
- core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
- core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
- core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
- stats[GENERIC_STAT_rx_nodesc_trunc] +
- stats[GENERIC_STAT_rx_noskb_drops];
- core_stats->multicast = stats[EF10_STAT_rx_multicast];
- core_stats->rx_length_errors =
- stats[EF10_STAT_rx_gtjumbo] +
- stats[EF10_STAT_rx_length_error];
- core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
- core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
- core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
- core_stats->rx_errors = (core_stats->rx_length_errors +
- core_stats->rx_crc_errors +
- core_stats->rx_frame_errors);
+ efx_ef10_get_stat_mask(efx, mask);
+
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ if (rc) {
+ spin_lock_bh(&efx->stats_lock);
+ return rc;
}
- return stats_count;
+ dma_stats = stats_buf.addr;
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+ MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 1);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->stats_lock);
+ if (rc) {
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc != -ENOENT || atomic_read(&efx->active_queues))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+ sizeof(inbuf), NULL, 0, rc);
+ goto out;
+ }
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+ stats, stats_buf.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ efx_update_sw_stats(efx, stats);
+out:
+ efx_nic_free_buffer(efx, &stats_buf);
+ return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx_ef10_try_update_nic_stats_vf(efx))
+ return 0;
+
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1044,6 +1424,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
}
}
+static void efx_ef10_get_wol_vf(struct efx_nic *efx,
+ struct ethtool_wolinfo *wol) {}
+
+static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
+{
+ return -EOPNOTSUPP;
+}
+
static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
{
wol->supported = 0;
@@ -1123,13 +1511,17 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* All our allocations have been reset */
efx_ef10_reset_mc_allocations(efx);
+ /* Driver-created vswitches and vports must be re-created */
+ nic_data->must_probe_vswitching = true;
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
/* The datapath firmware might have been changed */
nic_data->must_check_datapath_caps = true;
/* MAC statistics have been cleared on the NIC; clear the local
* statistic that we update with efx_update_diff_stat().
*/
- nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
return -EIO;
}
@@ -1232,16 +1624,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
efx_qword_t *txd;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
@@ -1251,7 +1644,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = tx_queue->txd.buf.dma_addr;
@@ -1266,7 +1659,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
goto fail;
@@ -1299,7 +1692,7 @@ fail:
static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = tx_queue->efx;
size_t outlen;
int rc;
@@ -1378,19 +1771,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
}
}
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
+ bool exclusive, unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
size_t outlen;
int rc;
+ u32 alloc_type = exclusive ?
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ unsigned rss_spread = exclusive ?
+ efx->rss_spread :
+ min(rounddown_pow_of_two(efx->rss_spread),
+ EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+ if (!exclusive && rss_spread == 1) {
+ *context = EFX_EF10_RSS_CONTEXT_INVALID;
+ if (context_size)
+ *context_size = 1;
+ return 0;
+ }
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
- EVB_PORT_ID_ASSIGNED);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
- MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
- MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
- EFX_MAX_CHANNELS);
+ nic_data->vport_id);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -1402,6 +1809,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (context_size)
+ *context_size = rss_spread;
+
return 0;
}
@@ -1418,7 +1828,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
WARN_ON(rc != 0);
}
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
+ const u32 *rx_indir_table)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -1432,7 +1843,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
- (u8) efx->rx_indir_table[i];
+ (u8) rx_indir_table[i];
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
sizeof(tablebuf), NULL, 0, NULL);
@@ -1460,27 +1871,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
}
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
+ unsigned *context_size)
{
+ u32 new_rx_rss_context;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- int rc;
+ int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ false, context_size);
- netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
+ if (rc != 0)
+ return rc;
- if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
- rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
- if (rc != 0)
- goto fail;
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = false;
+ efx_set_default_rx_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
+ const u32 *rx_indir_table)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+ u32 new_rx_rss_context;
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
+ !nic_data->rx_rss_context_exclusive) {
+ rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+ true, NULL);
+ if (rc == -EOPNOTSUPP)
+ return rc;
+ else if (rc != 0)
+ goto fail1;
+ } else {
+ new_rx_rss_context = nic_data->rx_rss_context;
}
- rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
+ rx_indir_table);
if (rc != 0)
- goto fail;
+ goto fail2;
- return;
+ if (nic_data->rx_rss_context != new_rx_rss_context)
+ efx_ef10_rx_free_indir_table(efx);
+ nic_data->rx_rss_context = new_rx_rss_context;
+ nic_data->rx_rss_context_exclusive = true;
+ if (rx_indir_table != efx->rx_indir_table)
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
+ return 0;
-fail:
+fail2:
+ if (new_rx_rss_context != nic_data->rx_rss_context)
+ efx_ef10_free_rss_context(efx, new_rx_rss_context);
+fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ int rc;
+
+ if (efx->rss_spread == 1)
+ return 0;
+
+ rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+
+ if (rc == -ENOBUFS && !user) {
+ unsigned context_size;
+ bool mismatch = false;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
+ i++)
+ mismatch = rx_indir_table[i] !=
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+ rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
+ if (rc == 0) {
+ if (context_size != efx->rss_spread)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one of"
+ " different size."
+ " Wanted %u, got %u.\n",
+ efx->rss_spread, context_size);
+ else if (mismatch)
+ netif_warn(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one but"
+ " could not apply custom"
+ " indirection.\n");
+ else
+ netif_info(efx, probe, efx->net_dev,
+ "Could not allocate an exclusive RSS"
+ " context; allocated a shared one.\n");
+ }
+ }
+ return rc;
+}
+
+static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table
+ __attribute__ ((unused)))
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (user)
+ return -EOPNOTSUPP;
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ return 0;
+ return efx_ef10_rx_push_shared_rss_config(efx, NULL);
}
static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
@@ -1496,14 +1999,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
struct efx_nic *efx = rx_queue->efx;
- size_t inlen, outlen;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t inlen;
dma_addr_t dma_addr;
int rc;
int i;
+ BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0;
@@ -1517,7 +2021,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_PREFIX, 1,
INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
- MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
dma_addr = rx_queue->rxd.buf.dma_addr;
@@ -1532,7 +2036,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
- outbuf, sizeof(outbuf), &outlen);
+ NULL, 0, NULL);
if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
efx_rx_queue_index(rx_queue));
@@ -1541,7 +2045,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = rx_queue->efx;
size_t outlen;
int rc;
@@ -1703,7 +2207,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
static void efx_ef10_ev_fini(struct efx_channel *channel)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
struct efx_nic *efx = channel->efx;
size_t outlen;
int rc;
@@ -2286,11 +2790,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
match_fields);
}
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
@@ -3055,6 +3560,9 @@ fail:
return rc;
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_table_restore(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3064,9 +3572,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
bool failed = false;
int rc;
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
if (!nic_data->must_restore_filters)
return;
+ if (!table)
+ return;
+
spin_lock_bh(&efx->filter_lock);
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
@@ -3102,6 +3615,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
+/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3110,6 +3624,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx->filter_state = NULL;
+ if (!table)
+ return;
+
for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec)
@@ -3135,6 +3653,9 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3149,6 +3670,9 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
if (!efx_dev_registered(efx))
return;
+ if (!table)
+ return;
+
/* Mark old filters that may need to be removed */
spin_lock_bh(&efx->filter_lock);
n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
@@ -3280,6 +3804,78 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
WARN_ON(remove_failed);
}
+static int efx_ef10_set_mac_address(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ bool was_enabled = efx->port_enabled;
+ int rc;
+
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_table_remove(efx);
+
+ ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
+ efx->net_dev->dev_addr);
+ MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ nic_data->vport_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+
+ efx_ef10_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (was_enabled)
+ efx_net_open(efx->net_dev);
+ netif_device_attach(efx->net_dev);
+
+#if !defined(CONFIG_SFC_SRIOV)
+ if (rc == -EPERM)
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable mac-spoofing"
+ " on this interface\n");
+#else
+ if (rc == -EPERM) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ /* Switch to PF and change MAC address on vport */
+ if (efx->pci_dev->is_virtfn && pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ if (!efx_ef10_sriov_set_vf_mac(efx_pf,
+ nic_data->vf_index,
+ efx->net_dev->dev_addr))
+ return 0;
+ }
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable mac-spoofing"
+ " on this interface\n");
+ } else if (efx->pci_dev->is_virtfn) {
+ /* Successfully changed by VF (with MAC spoofing), so update the
+ * parent PF if possible.
+ */
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+ unsigned int i;
+
+ for (i = 0; i < efx_pf->vf_count; ++i) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ if (vf->efx == efx) {
+ ether_addr_copy(vf->mac,
+ efx->net_dev->dev_addr);
+ return 0;
+ }
+ }
+ }
+ }
+#endif
+ return rc;
+}
+
static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
{
efx_ef10_filter_sync_rx_mode(efx);
@@ -3287,6 +3883,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
return efx_mcdi_set_mac(efx);
}
+static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return 0;
+}
+
static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
@@ -3494,6 +4097,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
}
+static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
+ u32 host_time) {}
+
static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
bool temp)
{
@@ -3571,6 +4177,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
return 0;
}
+static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
+ struct hwtstamp_config *init)
+{
+ return -EOPNOTSUPP;
+}
+
static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
struct hwtstamp_config *init)
{
@@ -3607,14 +4219,118 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+ .is_vf = true,
+ .mem_bar = EFX_MEM_VF_BAR,
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe_vf,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_ef10_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_ef10_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .prepare_flr = efx_ef10_prepare_flr,
+ .finish_flr = efx_port_dummy_op_void,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats_vf,
+ .start_stats = efx_port_dummy_op_void,
+ .pull_stats = efx_port_dummy_op_void,
+ .stop_stats = efx_port_dummy_op_void,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol_vf,
+ .set_wol = efx_ef10_set_wol_vf,
+ .resume_wol = efx_port_dummy_op_void,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_port_dummy_op_int,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
+ .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_ef10_vswitching_probe_vf,
+ .vswitching_restore = efx_ef10_vswitching_restore_vf,
+ .vswitching_remove = efx_ef10_vswitching_remove_vf,
+ .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_vf,
+ .set_mac_address = efx_ef10_set_mac_address,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+ .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+ 1 << HWTSTAMP_FILTER_ALL,
+};
+
const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = efx_ef10_mem_map_size,
- .probe = efx_ef10_probe,
+ .probe = efx_ef10_probe_pf,
.remove = efx_ef10_remove,
.dimension_resources = efx_ef10_dimension_resources,
.init = efx_ef10_init_nic,
.fini = efx_port_dummy_op_void,
- .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_reason = efx_ef10_map_reset_reason,
.map_reset_flags = efx_ef10_map_reset_flags,
.reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe,
@@ -3623,7 +4339,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
- .update_stats = efx_ef10_update_stats,
+ .update_stats = efx_ef10_update_stats_pf,
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
@@ -3650,7 +4366,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_init = efx_ef10_tx_init,
.tx_remove = efx_ef10_tx_remove,
.tx_write = efx_ef10_tx_write,
- .rx_push_rss_config = efx_ef10_rx_push_rss_config,
+ .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -3689,11 +4405,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
.sriov_fini = efx_ef10_sriov_fini,
- .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
.sriov_wanted = efx_ef10_sriov_wanted,
.sriov_reset = efx_ef10_sriov_reset,
+ .sriov_flr = efx_ef10_sriov_flr,
+ .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
+ .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
+ .vswitching_probe = efx_ef10_vswitching_probe_pf,
+ .vswitching_restore = efx_ef10_vswitching_restore_pf,
+ .vswitching_remove = efx_ef10_vswitching_remove_pf,
+#endif
+ .get_mac_address = efx_ef10_get_mac_address_pf,
+ .set_mac_address = efx_ef10_set_mac_address,
.revision = EFX_REV_HUNT_A0,
.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
new file mode 100644
index 000000000000..6c9b6e45509a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -0,0 +1,783 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "ef10_sriov.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_pcol.h"
+
+static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vf_fn)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
+ MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
+ EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
+ EVB_PORT_ASSIGN_IN_VF, vf_fn);
+
+ return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_add_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_del_mac(struct efx_nic *efx,
+ unsigned int port_id, u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
+ unsigned int vswitch_type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2);
+ MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
+ VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
+
+ /* Quietly try to allocate 2 VLAN tags */
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */
+ if (rc == -EPROTO) {
+ MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1);
+ rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ } else if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC,
+ MC_CMD_VSWITCH_ALLOC_IN_LEN,
+ NULL, 0, rc);
+ }
+ return rc;
+}
+
+static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_alloc(struct efx_nic *efx,
+ unsigned int port_id_in,
+ unsigned int vport_type,
+ u16 vlan,
+ unsigned int *port_id_out)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ EFX_WARN_ON_PARANOID(!port_id_out);
+
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
+ MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS,
+ (vlan != EFX_EF10_NO_VLAN));
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
+ VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
+ if (vlan != EFX_EF10_NO_VLAN)
+ MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS,
+ VPORT_ALLOC_IN_VLAN_TAG_0, vlan);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
+ return 0;
+}
+
+static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
+
+ return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+ MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+ return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int i;
+
+ if (!nic_data->vf)
+ return;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ struct ef10_vf *vf = nic_data->vf + i;
+
+ /* If VF is assigned, do not free the vport */
+ if (vf->pci_dev &&
+ vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ continue;
+
+ if (vf->vport_assigned) {
+ efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ eth_zero_addr(vf->mac);
+ }
+
+ if (vf->vport_id) {
+ efx_ef10_vport_free(efx, vf->vport_id);
+ vf->vport_id = 0;
+ }
+
+ vf->efx = NULL;
+ }
+}
+
+static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+}
+
+static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
+ unsigned int vf_i)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf = nic_data->vf + vf_i;
+ int rc;
+
+ if (WARN_ON_ONCE(!nic_data->vf))
+ return -EOPNOTSUPP;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ return rc;
+
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ return rc;
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ return rc;
+
+ vf->vport_assigned = 1;
+ return 0;
+}
+
+static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
+ return -ENOMEM;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ random_ether_addr(nic_data->vf[i].mac);
+ nic_data->vf[i].efx = NULL;
+ nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
+
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vports(efx);
+ kfree(nic_data->vf);
+ nic_data->vf = NULL;
+ return rc;
+}
+
+static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
+{
+ unsigned int i;
+ int rc;
+
+ for (i = 0; i < efx->vf_count; i++) {
+ rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+ if (rc)
+ goto fail;
+ }
+
+ return 0;
+fail:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ return rc;
+}
+
+/* On top of the default firmware vswitch setup, create a VEB vswitch and
+ * expansion vport for use by this function.
+ */
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ int rc;
+
+ if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
+ /* vswitch not needed as we have no VFs */
+ efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return 0;
+ }
+
+ rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
+ if (rc)
+ goto fail1;
+
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ EFX_EF10_NO_VLAN, &nic_data->vport_id);
+ if (rc)
+ goto fail2;
+
+ rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+ if (rc)
+ goto fail3;
+ ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail4;
+
+ return 0;
+fail4:
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+ eth_zero_addr(nic_data->vport_mac);
+fail3:
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+fail2:
+ efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
+fail1:
+ return rc;
+}
+
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+}
+
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vswitching_probe_pf(efx);
+ if (rc)
+ goto fail;
+
+ rc = efx_ef10_sriov_restore_vf_vswitching(efx);
+ if (rc)
+ goto fail;
+
+ nic_data->must_probe_vswitching = false;
+fail:
+ return rc;
+}
+
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (!nic_data->must_probe_vswitching)
+ return 0;
+
+ rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ return rc;
+
+ nic_data->must_probe_vswitching = false;
+ return 0;
+}
+
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+
+ efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+
+ if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+ return; /* No vswitch was ever created */
+
+ if (!is_zero_ether_addr(nic_data->vport_mac)) {
+ efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+ efx->net_dev->dev_addr);
+ eth_zero_addr(nic_data->vport_mac);
+ }
+ efx_ef10_vport_free(efx, nic_data->vport_id);
+ nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
+ /* Only free the vswitch if no VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ efx_ef10_vswitch_free(efx, nic_data->vport_id);
+}
+
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
+{
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+}
+
+static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+ int rc = 0;
+ struct pci_dev *dev = efx->pci_dev;
+
+ efx->vf_count = num_vfs;
+
+ rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
+ if (rc)
+ goto fail1;
+
+ rc = pci_enable_sriov(dev, num_vfs);
+ if (rc)
+ goto fail2;
+
+ return 0;
+fail2:
+ efx_ef10_sriov_free_vf_vswitching(efx);
+fail1:
+ efx->vf_count = 0;
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to enable SRIOV VFs\n");
+ return rc;
+}
+
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+ struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned = 0;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+
+ if (vfs_assigned && !force) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
+
+ efx_ef10_sriov_free_vf_vswitching(efx);
+ efx->vf_count = 0;
+ return 0;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ if (num_vfs == 0)
+ return efx_ef10_pci_sriov_disable(efx, false);
+ else
+ return efx_ef10_pci_sriov_enable(efx, num_vfs);
+}
+
+int efx_ef10_sriov_init(struct efx_nic *efx)
+{
+ return 0;
+}
+
+void efx_ef10_sriov_fini(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
+ int rc;
+
+ if (!nic_data->vf) {
+ /* Remove any un-assigned orphaned VFs */
+ if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+ pci_disable_sriov(efx->pci_dev);
+ return;
+ }
+
+ /* Remove any VFs in the host */
+ for (i = 0; i < efx->vf_count; ++i) {
+ struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+ if (vf_efx)
+ vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+ }
+
+ rc = efx_ef10_pci_sriov_disable(efx, true);
+ if (rc)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Disabling SRIOV was not successful rc=%d\n", rc);
+ else
+ netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
+}
+
+static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
+ u8 *mac)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+ MCDI_DECLARE_BUF_ERR(outbuf);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+ ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ int rc;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ vf = nic_data->vf + vf_i;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ }
+
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc)
+ return rc;
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ return rc;
+ }
+
+ if (!is_zero_ether_addr(mac)) {
+ rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
+ if (rc) {
+ eth_zero_addr(vf->mac);
+ goto fail;
+ }
+ if (vf->efx)
+ ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ }
+
+ ether_addr_copy(vf->mac, mac);
+
+ rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc)
+ goto fail;
+
+ if (vf->efx) {
+ /* VF cannot use the vport_id that the PF created */
+ rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc) {
+ up_write(&vf->efx->filter_sem);
+ return rc;
+ }
+ vf->efx->type->filter_table_probe(vf->efx);
+ up_write(&vf->efx->filter_sem);
+ efx_net_open(vf->efx->net_dev);
+ netif_device_attach(vf->efx->net_dev);
+ }
+
+ return 0;
+
+fail:
+ memset(vf->mac, 0, ETH_ALEN);
+ return rc;
+}
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ u16 old_vlan, new_vlan;
+ int rc = 0, rc2 = 0;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+ if (qos != 0)
+ return -EINVAL;
+
+ vf = nic_data->vf + vf_i;
+
+ new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan;
+ if (new_vlan == vf->vlan)
+ return 0;
+
+ if (vf->efx) {
+ efx_device_detach_sync(vf->efx);
+ efx_net_stop(vf->efx->net_dev);
+
+ down_write(&vf->efx->filter_sem);
+ vf->efx->type->filter_table_remove(vf->efx);
+
+ rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc)
+ goto restore_filters;
+ }
+
+ if (vf->vport_assigned) {
+ rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+ if (rc) {
+ netif_warn(efx, drv, efx->net_dev,
+ "Failed to change vlan on VF %d.\n", vf_i);
+ netif_warn(efx, drv, efx->net_dev,
+ "This is likely because the VF is bound to a driver in a VM.\n");
+ netif_warn(efx, drv, efx->net_dev,
+ "Please unload the driver in the VM.\n");
+ goto restore_vadaptor;
+ }
+ vf->vport_assigned = 0;
+ }
+
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+ if (rc)
+ goto restore_evb_port;
+ }
+
+ if (vf->vport_id) {
+ rc = efx_ef10_vport_free(efx, vf->vport_id);
+ if (rc)
+ goto restore_mac;
+ vf->vport_id = 0;
+ }
+
+ /* Do the actual vlan change */
+ old_vlan = vf->vlan;
+ vf->vlan = new_vlan;
+
+ /* Restore everything in reverse order */
+ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+ MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+ vf->vlan, &vf->vport_id);
+ if (rc)
+ goto reset_nic;
+
+restore_mac:
+ if (!is_zero_ether_addr(vf->mac)) {
+ rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+ if (rc2) {
+ eth_zero_addr(vf->mac);
+ goto reset_nic;
+ }
+ }
+
+restore_evb_port:
+ rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+ if (rc2)
+ goto reset_nic;
+ else
+ vf->vport_assigned = 1;
+
+restore_vadaptor:
+ if (vf->efx) {
+ rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+ if (rc2)
+ goto reset_nic;
+ }
+
+restore_filters:
+ if (vf->efx) {
+ rc2 = vf->efx->type->filter_table_probe(vf->efx);
+ if (rc2)
+ goto reset_nic;
+
+ up_write(&vf->efx->filter_sem);
+
+ rc2 = efx_net_open(vf->efx->net_dev);
+ if (rc2)
+ goto reset_nic;
+
+ netif_device_attach(vf->efx->net_dev);
+ }
+ return rc;
+
+reset_nic:
+ if (vf->efx) {
+ up_write(&vf->efx->filter_sem);
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore VF - scheduling reset.\n");
+ efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+ } else {
+ netif_err(efx, drv, efx->net_dev,
+ "Failed to restore the VF and cannot reset the VF "
+ "- VF is not functional.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Please reload the driver attached to the VF.\n");
+ }
+
+ return rc ? rc : rc2;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
+ bool spoofchk)
+{
+ return spoofchk ? -EOPNOTSUPP : 0;
+}
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP);
+ BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE !=
+ MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN);
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state);
+ return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL); /* don't care what old mode was */
+}
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN);
+
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct ef10_vf *vf;
+ size_t outlen;
+ int rc;
+
+ if (vf_i >= efx->vf_count)
+ return -EINVAL;
+
+ if (!nic_data->vf)
+ return -EOPNOTSUPP;
+
+ vf = nic_data->vf + vf_i;
+
+ ivf->vf = vf_i;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = 0;
+ ether_addr_copy(ivf->mac, vf->mac);
+ ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan;
+ ivf->qos = 0;
+
+ MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+ LINK_STATE_MODE_IN_FUNCTION_PF,
+ nic_data->pf_index,
+ LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+ MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE,
+ MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE);
+ rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN)
+ return -EIO;
+ ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE);
+
+ return 0;
+}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
new file mode 100644
index 000000000000..db4ef537c610
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -0,0 +1,69 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF10_SRIOV_H
+#define EF10_SRIOV_H
+
+#include "net_driver.h"
+
+/**
+ * struct ef10_vf - PF's store of VF data
+ * @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
+ * @vport_id: vport ID for the VF
+ * @vport_assigned: record whether the vport is currently assigned to the VF
+ * @mac: MAC address for the VF, zero when address is removed from the vport
+ * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN
+ */
+struct ef10_vf {
+ struct efx_nic *efx;
+ struct pci_dev *pci_dev;
+ unsigned int vport_id;
+ unsigned int vport_assigned;
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+#define EFX_EF10_NO_VLAN 0
+};
+
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
+{
+ return false;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef10_sriov_init(struct efx_nic *efx);
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+void efx_ef10_sriov_fini(struct efx_nic *efx);
+static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
+ u16 vlan, u8 qos);
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivf);
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+ int link_state);
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+
+#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4b00545a3ace..0c42ed9c9e4c 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -26,6 +26,7 @@
#include "efx.h"
#include "nic.h"
#include "selftest.h"
+#include "sriov.h"
#include "mcdi.h"
#include "workarounds.h"
@@ -76,6 +77,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DATAPATH] = "DATAPATH",
[RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
@@ -948,6 +950,16 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
static void efx_fini_port(struct efx_nic *efx);
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+ down_read(&efx->filter_sem);
+ efx->type->reconfigure_mac(efx);
+ up_read(&efx->filter_sem);
+}
+
/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
* the MAC appropriately. All other PHY configuration changes are pushed
* through phy_op->set_settings(), and pushed asynchronously to the MAC
@@ -1001,7 +1013,7 @@ static void efx_mac_work(struct work_struct *data)
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1041,11 +1053,11 @@ static int efx_init_port(struct efx_nic *efx)
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx);
- if (rc)
+ if (rc && rc != -EPERM)
goto fail2;
mutex_unlock(&efx->mac_lock);
@@ -1067,7 +1079,7 @@ static void efx_start_port(struct efx_nic *efx)
efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
}
@@ -1200,10 +1212,12 @@ static int efx_init_io(struct efx_nic *efx)
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
unsigned int mem_map_size = efx->type->mem_map_size(efx);
- int rc;
+ int rc, bar;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+ bar = efx->type->mem_bar;
+
rc = pci_enable_device(pci_dev);
if (rc) {
netif_err(efx, probe, efx->net_dev,
@@ -1234,8 +1248,8 @@ static int efx_init_io(struct efx_nic *efx)
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
- rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+ efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+ rc = pci_request_region(pci_dev, bar, "sfc");
if (rc) {
netif_err(efx, probe, efx->net_dev,
"request for memory BAR failed\n");
@@ -1258,7 +1272,7 @@ static int efx_init_io(struct efx_nic *efx)
return 0;
fail4:
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ pci_release_region(efx->pci_dev, bar);
fail3:
efx->membase_phys = 0;
fail2:
@@ -1269,6 +1283,8 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
+ int bar;
+
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
@@ -1277,11 +1293,23 @@ static void efx_fini_io(struct efx_nic *efx)
}
if (efx->membase_phys) {
- pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+ bar = efx->type->mem_bar;
+ pci_release_region(efx->pci_dev, bar);
efx->membase_phys = 0;
}
- pci_disable_device(efx->pci_dev);
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->rss_spread);
}
static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
@@ -1314,15 +1342,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
/* If RSS is requested for the PF *and* VFs then we can't write RSS
* table entries that are inaccessible to VFs
*/
- if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
- count > efx_vf_size(efx)) {
- netif_warn(efx, probe, efx->net_dev,
- "Reducing number of RSS channels from %u to %u for "
- "VF support. Increase vf-msix-limit to use more "
- "channels on the PF.\n",
- count, efx_vf_size(efx));
- count = efx_vf_size(efx);
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+ count > efx_vf_size(efx)) {
+ netif_warn(efx, probe, efx->net_dev,
+ "Reducing number of RSS channels from %u to %u for "
+ "VF support. Increase vf-msix-limit to use more "
+ "channels on the PF.\n",
+ count, efx_vf_size(efx));
+ count = efx_vf_size(efx);
+ }
}
+#endif
return count;
}
@@ -1426,10 +1458,15 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
-
- efx->rss_spread = ((efx->n_rx_channels > 1 ||
- !efx->type->sriov_wanted(efx)) ?
- efx->n_rx_channels : efx_vf_size(efx));
+#ifdef CONFIG_SFC_SRIOV
+ if (efx->type->sriov_wanted) {
+ efx->rss_spread = ((efx->n_rx_channels > 1 ||
+ !efx->type->sriov_wanted(efx)) ?
+ efx->n_rx_channels : efx_vf_size(efx));
+ return 0;
+ }
+#endif
+ efx->rss_spread = efx->n_rx_channels;
return 0;
}
@@ -1593,7 +1630,6 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
- size_t i;
int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
@@ -1616,10 +1652,9 @@ static int efx_probe_nic(struct efx_nic *efx)
goto fail2;
if (efx->n_channels > 1)
- netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ netdev_rss_key_fill(&efx->rx_hash_key,
+ sizeof(efx->rx_hash_key));
+ efx_set_default_rx_indir_table(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -1650,10 +1685,11 @@ static int efx_probe_filters(struct efx_nic *efx)
int rc;
spin_lock_init(&efx->filter_lock);
-
+ init_rwsem(&efx->filter_sem);
+ down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
- return rc;
+ goto out_unlock;
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
@@ -1662,12 +1698,14 @@ static int efx_probe_filters(struct efx_nic *efx)
GFP_KERNEL);
if (!efx->rps_flow_id) {
efx->type->filter_table_remove(efx);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_unlock;
}
}
#endif
-
- return 0;
+out_unlock:
+ up_write(&efx->filter_sem);
+ return rc;
}
static void efx_remove_filters(struct efx_nic *efx)
@@ -1675,12 +1713,16 @@ static void efx_remove_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id);
#endif
+ down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
+ up_write(&efx->filter_sem);
}
static void efx_restore_filters(struct efx_nic *efx)
{
+ down_read(&efx->filter_sem);
efx->type->filter_table_restore(efx);
+ up_read(&efx->filter_sem);
}
/**************************************************************************
@@ -1712,21 +1754,33 @@ static int efx_probe_all(struct efx_nic *efx)
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_probe(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to setup vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
rc = efx_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
- goto fail3;
+ goto fail4;
}
rc = efx_probe_channels(efx);
if (rc)
- goto fail4;
+ goto fail5;
return 0;
- fail4:
+ fail5:
efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
fail3:
efx_remove_port(efx);
fail2:
@@ -1816,6 +1870,9 @@ static void efx_remove_all(struct efx_nic *efx)
{
efx_remove_channels(efx);
efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+ efx->type->vswitching_remove(efx);
+#endif
efx_remove_port(efx);
efx_remove_nic(efx);
}
@@ -2059,7 +2116,7 @@ static int efx_busy_poll(struct napi_struct *napi)
*************************************************************************/
/* Context: process, rtnl_lock() held. */
-static int efx_net_open(struct net_device *net_dev)
+int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
@@ -2088,7 +2145,7 @@ static int efx_net_open(struct net_device *net_dev)
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
-static int efx_net_stop(struct net_device *net_dev)
+int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2146,7 +2203,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -2159,6 +2216,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
u8 *new_addr = addr->sa_data;
+ u8 old_addr[6];
+ int rc;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -2167,12 +2226,20 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
return -EADDRNOTAVAIL;
}
+ /* save old address */
+ ether_addr_copy(old_addr, net_dev->dev_addr);
ether_addr_copy(net_dev->dev_addr, new_addr);
- efx->type->sriov_mac_address_changed(efx);
+ if (efx->type->set_mac_address) {
+ rc = efx->type->set_mac_address(efx);
+ if (rc) {
+ ether_addr_copy(net_dev->dev_addr, old_addr);
+ return rc;
+ }
+ }
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
return 0;
@@ -2199,7 +2266,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_farch_netdev_ops = {
+static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2212,10 +2279,12 @@ static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
#ifdef CONFIG_SFC_SRIOV
- .ndo_set_vf_mac = efx_siena_sriov_set_vf_mac,
- .ndo_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
- .ndo_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
- .ndo_get_vf_config = efx_siena_sriov_get_vf_config,
+ .ndo_set_vf_mac = efx_sriov_set_vf_mac,
+ .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
+ .ndo_get_vf_config = efx_sriov_get_vf_config,
+ .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
+ .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2229,29 +2298,6 @@ static const struct net_device_ops efx_farch_netdev_ops = {
#endif
};
-static const struct net_device_ops efx_ef10_netdev_ops = {
- .ndo_open = efx_net_open,
- .ndo_stop = efx_net_stop,
- .ndo_get_stats64 = efx_net_stats,
- .ndo_tx_timeout = efx_watchdog,
- .ndo_start_xmit = efx_hard_start_xmit,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = efx_ioctl,
- .ndo_change_mtu = efx_change_mtu,
- .ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_rx_mode = efx_set_rx_mode,
- .ndo_set_features = efx_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = efx_busy_poll,
-#endif
-#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = efx_filter_rfs,
-#endif
-};
-
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2264,8 +2310,7 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
- net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+ if ((net_dev->netdev_ops == &efx_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2284,6 +2329,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2292,12 +2359,9 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
- } else {
- net_dev->netdev_ops = &efx_farch_netdev_ops;
- }
net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2344,9 +2408,21 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_attr_mcdi_logging;
+ }
+#endif
return 0;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2365,13 +2441,14 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
- rtnl_lock();
- unregister_netdevice(efx->net_dev);
- efx->state = STATE_UNINIT;
- rtnl_unlock();
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
}
/**************************************************************************
@@ -2393,7 +2470,8 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH)
efx->phy_op->fini(efx);
efx->type->fini(efx);
}
@@ -2422,11 +2500,13 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (!ok)
goto fail;
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+ if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+ method != RESET_TYPE_DATAPATH) {
rc = efx->phy_op->init(efx);
if (rc)
goto fail;
- if (efx->phy_op->reconfigure(efx))
+ rc = efx->phy_op->reconfigure(efx);
+ if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
}
@@ -2434,8 +2514,20 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx_enable_interrupts(efx);
if (rc)
goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+ rc = efx->type->vswitching_restore(efx);
+ if (rc) /* not fatal; the PF will still work fine */
+ netif_warn(efx, probe, efx->net_dev,
+ "failed to restore vswitching rc=%d;"
+ " VFs may not function\n", rc);
+#endif
+
+ down_read(&efx->filter_sem);
efx_restore_filters(efx);
- efx->type->sriov_reset(efx);
+ up_read(&efx->filter_sem);
+ if (efx->type->sriov_reset)
+ efx->type->sriov_reset(efx);
mutex_unlock(&efx->mac_lock);
@@ -2605,6 +2697,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_DATAPATH:
case RESET_TYPE_MC_BIST:
case RESET_TYPE_MCDI_TIMEOUT:
method = type;
@@ -2655,6 +2748,8 @@ static const struct pci_device_id efx_pci_table[] = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
+ .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
@@ -2809,7 +2904,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
}
/* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
@@ -2826,7 +2922,9 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_disable_interrupts(efx);
rtnl_unlock();
- efx->type->sriov_fini(efx);
+ if (efx->type->sriov_fini)
+ efx->type->sriov_fini(efx);
+
efx_unregister_netdev(efx);
efx_mtd_remove(efx);
@@ -3008,7 +3106,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_probe_vpd_strings(efx);
+ if (!efx->type->is_vf)
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -3023,10 +3122,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail4;
- rc = efx->type->sriov_init(efx);
- if (rc)
- netif_err(efx, probe, efx->net_dev,
- "SR-IOV can't be enabled rc %d\n", rc);
+ if (efx->type->sriov_init) {
+ rc = efx->type->sriov_init(efx);
+ if (rc)
+ netif_err(efx, probe, efx->net_dev,
+ "SR-IOV can't be enabled rc %d\n", rc);
+ }
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
@@ -3058,6 +3159,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return rc;
}
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ int rc;
+ struct efx_nic *efx = pci_get_drvdata(dev);
+
+ if (efx->type->sriov_configure) {
+ rc = efx->type->sriov_configure(efx, num_vfs);
+ if (rc)
+ return rc;
+ else
+ return num_vfs;
+ } else
+ return -EOPNOTSUPP;
+}
+#endif
+
static int efx_pm_freeze(struct device *dev)
{
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
@@ -3280,6 +3401,9 @@ static struct pci_driver efx_pci_driver = {
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_pci_sriov_configure,
+#endif
};
/**************************************************************************
@@ -3302,9 +3426,11 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
+#ifdef CONFIG_SFC_SRIOV
rc = efx_init_sriov();
if (rc)
goto err_sriov;
+#endif
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
@@ -3321,8 +3447,10 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
err_sriov:
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -3334,7 +3462,9 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
+#ifdef CONFIG_SFC_SRIOV
efx_fini_sriov();
+#endif
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 2587c582a821..acb1e0718485 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -15,7 +15,12 @@
#include "filter.h"
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
#define EFX_MEM_BAR 2
+#define EFX_MEM_VF_BAR 0
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
/* TX */
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
@@ -32,6 +37,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
/* RX */
+void efx_set_default_rx_indir_table(struct efx_nic *efx);
void efx_rx_config_page_split(struct efx_nic *efx);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
@@ -71,6 +77,8 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
/* Filters */
+void efx_mac_reconfigure(struct efx_nic *efx);
+
/**
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
@@ -220,6 +228,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+ return 1 << efx->vi_scale;
+}
+#endif
+
static inline void efx_schedule_channel(struct efx_channel *channel)
{
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d1dbb5fb31bb..c94f56271dd4 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
@@ -159,6 +160,7 @@ enum reset_type {
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
+ RESET_TYPE_DATAPATH,
RESET_TYPE_MC_BIST,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4835bc0d0de8..034797661f96 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -734,7 +734,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
- efx->type->reconfigure_mac(efx);
+ efx_mac_reconfigure(efx);
out:
mutex_unlock(&efx->mac_lock);
@@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
return -EOPNOTSUPP;
if (!indir)
return 0;
- memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
- efx->type->rx_push_rss_config(efx);
- return 0;
+
+ return efx->type->rx_push_rss_config(efx, true, indir);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index f166c8ef38a3..80e69af21642 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*
**************************************************************************
*/
+static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
+{
+ (void) efx;
+ (void) user;
+ (void) rx_indir_table;
+ return -ENOSYS;
+}
-static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
+ (void) user;
/* Set hash key for IPv4 */
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+ return 0;
}
/**************************************************************************
@@ -2507,7 +2520,7 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- falcon_b0_rx_push_rss_config(efx);
+ falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2687,6 +2700,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2729,7 +2744,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
- .rx_push_rss_config = efx_port_dummy_op_void,
+ .rx_push_rss_config = dummy_rx_push_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -2766,11 +2781,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_A1,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2788,6 +2798,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
@@ -2867,11 +2879,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mtd_write = falcon_mtd_write,
.mtd_sync = falcon_mtd_sync,
#endif
- .sriov_init = efx_falcon_sriov_init,
- .sriov_fini = efx_falcon_sriov_fini,
- .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
- .sriov_wanted = efx_falcon_sriov_wanted,
- .sriov_reset = efx_falcon_sriov_reset,
.revision = EFX_REV_FALCON_B0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index bb89e96a125e..f08266f0eca2 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -20,6 +20,8 @@
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
+#include "sriov.h"
+#include "siena_sriov.h"
#include "io.h"
#include "workarounds.h"
@@ -1198,13 +1200,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_tx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_rx_flush_done(efx, event);
+#endif
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
@@ -1242,8 +1248,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
@@ -1252,8 +1261,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
- } else
+ }
+#ifdef CONFIG_SFC_SRIOV
+ else
efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
@@ -1317,9 +1329,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
+#ifdef CONFIG_SFC_SRIOV
case FSE_CZ_EV_CODE_USER_EV:
efx_siena_sriov_event(channel, &event);
break;
+#endif
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
@@ -1685,28 +1699,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
- if (efx->type->sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- nic_data->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
+ if (efx->type->sriov_wanted) {
+ if (efx->type->sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free;
+ unsigned entries_per_vf, vf_limit;
+
+ nic_data->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries +
+ EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
}
- vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d37928f01949..81640f8bb811 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
@@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static void efx_mcdi_abandon(struct efx_nic *efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+ "Enable MCDI logging on newly-probed functions");
+#endif
+
int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
bool already_attached;
- int rc;
+ int rc = -ENOMEM;
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
if (!efx->mcdi)
- return -ENOMEM;
+ goto fail;
mcdi = efx_mcdi(efx);
mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ /* consuming code assumes buffer is page-sized */
+ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail1;
+ mcdi->logging_enabled = mcdi_logging_default;
+#endif
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx)
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- return rc;
+ goto fail2;
/* Let the MC (and BMC, if this is a LOM) know that the driver
* is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx)
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
- return rc;
+ goto fail2;
}
if (already_attached)
/* Not a fatal error */
@@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx)
efx->primary = efx;
return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+ kfree(efx->mcdi);
+ efx->mcdi = NULL;
+fail:
+ return rc;
}
void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
kfree(efx->mcdi);
}
@@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr[2];
size_t hdr_len;
u32 xflags, seqno;
@@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
hdr_len = 8;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ int bytes = 0;
+ int i;
+ /* Lengths should always be a whole number of dwords, so scream
+ * if they're not.
+ */
+ WARN_ON_ONCE(hdr_len % 4);
+ WARN_ON_ONCE(inlen % 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+ for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+
efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
mcdi->new_epoch = false;
@@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr;
efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ size_t hdr_len, data_len;
+ int bytes = 0;
+ int i;
+
+ WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+ hdr_len = mcdi->resp_hdr_len / 4;
+ /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+ * to dword size, and the MCDI buffer is always dword size
+ */
+ data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr,
+ mcdi->resp_hdr_len + (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+ }
+#endif
+
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
@@ -406,7 +498,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
struct efx_mcdi_async_param *async;
size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (cmpxchg(&mcdi->state,
@@ -534,7 +626,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
size_t *outlen_actual, bool quiet)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+ MCDI_DECLARE_BUF_ERR(errbuf);
int rc;
if (mcdi->mode == MCDI_MODE_POLL)
@@ -1035,7 +1127,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
/* MAC stats are gather lazily. We can ignore this. */
break;
case MCDI_EVENT_CODE_FLR:
- efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+ if (efx->type->sriov_flr)
+ efx->type->sriov_flr(efx,
+ MCDI_EVENT_FIELD(*event, FLR_VF));
break;
case MCDI_EVENT_CODE_PTP_RX:
case MCDI_EVENT_CODE_PTP_FAULT:
@@ -1081,9 +1175,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- MCDI_DECLARE_BUF(outbuf,
- max(MC_CMD_GET_VERSION_OUT_LEN,
- MC_CMD_GET_CAPABILITIES_OUT_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
size_t outlength;
const __le16 *ver_words;
size_t offset;
@@ -1108,19 +1200,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
* single version. Report which variants are running.
*/
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
- BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
- offset += snprintf(
- buf + offset, len - offset, " rx? tx?");
- else
- offset += snprintf(
- buf + offset, len - offset, " rx%x tx%x",
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
- MCDI_WORD(outbuf,
- GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
+ nic_data->rx_dpcpu_fw_id,
+ nic_data->tx_dpcpu_fw_id);
/* It's theoretically possible for the string to exceed 31
* characters, though in practice the first three version
@@ -1150,10 +1234,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
- rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+ * specified will fail with EPERM, and we have to tell the MC we don't
+ * care what firmware we get.
+ */
+ if (rc == -EPERM) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+ MC_CMD_FW_DONT_CARE);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ }
+ if (rc) {
+ efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+ outbuf, outlen, rc);
goto fail;
+ }
if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = -EIO;
goto fail;
@@ -1178,16 +1278,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
* and are completely trusted by firmware. Abort probing
* if that's not true for this function.
*/
- if (driver_operating &&
- (efx->mcdi->fn_flags &
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
- (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
- 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
- netif_err(efx, probe, efx->net_dev,
- "This driver version only supports one function per port\n");
- return -ENODEV;
- }
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -1385,10 +1475,13 @@ fail1:
return rc;
}
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
unsigned int flags, index;
const char *reason;
size_t outlen;
@@ -1406,6 +1499,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
outbuf, sizeof(outbuf), &outlen);
+ if (rc == -EPERM)
+ return 0;
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc) {
@@ -1443,24 +1538,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
index));
- return 0;
+ return 1;
}
-static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+ int rc;
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
* reboot. We set a flag that makes the command a no-op if it
- * has already done so. We don't know what return code to
- * expect (0 or -EIO), so ignore it.
+ * has already done so.
+ * The MCDI will thus return either 0 or -EIO.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+ if (rc == -EIO)
+ rc = 0;
+ if (rc)
+ efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, rc);
+ return rc;
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
@@ -1468,12 +1570,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
int rc;
rc = efx_mcdi_read_assertion(efx);
- if (rc)
+ if (rc <= 0)
return rc;
- efx_mcdi_exit_assertion(efx);
-
- return 0;
+ return efx_mcdi_exit_assertion(efx);
}
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
@@ -1550,7 +1650,9 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
if (rc)
return rc;
- if (method == RESET_TYPE_WORLD)
+ if (method == RESET_TYPE_DATAPATH)
+ return 0;
+ else if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
else
return efx_mcdi_reset_func(efx);
@@ -1688,6 +1790,36 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
NULL, 0, NULL);
}
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ if (impl_out)
+ *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+ if (enabled_out)
+ *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 56465f7465a2..1838afe2da92 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -58,6 +58,8 @@ enum efx_mcdi_mode {
* enabled
* @async_list: Queue of asynchronous requests
* @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -74,6 +76,10 @@ struct efx_mcdi_iface {
spinlock_t async_lock;
struct list_head async_list;
struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *logging_buffer;
+ bool logging_enabled;
+#endif
};
struct efx_mcdi_mon {
@@ -176,10 +182,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
-#define MCDI_DECLARE_BUF(_name, _len) \
+#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
-#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len) \
- MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define MCDI_DECLARE_BUF(_name, _len) \
+ _MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name) \
+ MCDI_DECLARE_BUF(_name, 8)
#define _MCDI_PTR(_buf, _offset) \
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
@@ -339,6 +347,8 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+ unsigned int *enabled_out);
#ifdef CONFIG_SFC_MCDI_MON
int efx_mcdi_mon_probe(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index e028de10e1b7..45fca9fc66b7 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -638,6 +638,8 @@
*/
#define MC_CMD_READ32 0x1
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
@@ -659,6 +661,8 @@
*/
#define MC_CMD_WRITE32 0x2
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
@@ -679,6 +683,8 @@
*/
#define MC_CMD_COPYCODE 0x3
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
/* Source address */
@@ -717,6 +723,8 @@
*/
#define MC_CMD_SET_FUNC 0x4
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
@@ -732,6 +740,8 @@
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -758,6 +768,8 @@
*/
#define MC_CMD_GET_ASSERTS 0x6
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
@@ -794,6 +806,8 @@
*/
#define MC_CMD_LOG_CTRL 0x7
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
@@ -814,6 +828,8 @@
*/
#define MC_CMD_GET_VERSION 0x8
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
@@ -870,6 +886,8 @@
*/
#define MC_CMD_PTP 0xb
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
/* PTP operation code */
@@ -1404,6 +1422,8 @@
*/
#define MC_CMD_CSR_READ32 0xc
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
@@ -1428,6 +1448,8 @@
*/
#define MC_CMD_CSR_WRITE32 0xd
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
@@ -1452,6 +1474,8 @@
*/
#define MC_CMD_HP 0x54
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_HP_IN msgrequest */
#define MC_CMD_HP_IN_LEN 16
/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
@@ -1493,6 +1517,8 @@
*/
#define MC_CMD_STACKINFO 0xf
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_STACKINFO_IN msgrequest */
#define MC_CMD_STACKINFO_IN_LEN 0
@@ -1513,6 +1539,8 @@
*/
#define MC_CMD_MDIO_READ 0x10
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1552,6 +1580,8 @@
*/
#define MC_CMD_MDIO_WRITE 0x11
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
@@ -1591,6 +1621,8 @@
*/
#define MC_CMD_DBI_WRITE 0x12
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
@@ -1739,6 +1771,8 @@
*/
#define MC_CMD_GET_BOARD_CFG 0x18
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
@@ -1778,6 +1812,8 @@
*/
#define MC_CMD_DBI_READX 0x19
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
@@ -1822,6 +1858,8 @@
*/
#define MC_CMD_SET_RAND_SEED 0x1a
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
/* Seed value. */
@@ -1863,6 +1901,8 @@
*/
#define MC_CMD_DRV_ATTACH 0x1c
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state (0=detached, 1=attached) to set if UPDATE=1 */
@@ -1875,6 +1915,8 @@
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
@@ -1920,6 +1962,8 @@
*/
#define MC_CMD_PORT_RESET 0x20
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_PORT_RESET_IN msgrequest */
#define MC_CMD_PORT_RESET_IN_LEN 0
@@ -1934,6 +1978,7 @@
* extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
@@ -2023,6 +2068,8 @@
*/
#define MC_CMD_PUTS 0x23
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
@@ -2050,6 +2097,8 @@
*/
#define MC_CMD_GET_PHY_CFG 0x24
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
@@ -2149,6 +2198,8 @@
*/
#define MC_CMD_START_BIST 0x25
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
@@ -2185,6 +2236,8 @@
*/
#define MC_CMD_POLL_BIST 0x26
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -2344,6 +2397,8 @@
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
@@ -2463,6 +2518,8 @@
*/
#define MC_CMD_GET_LINK 0x29
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
@@ -2519,6 +2576,8 @@
*/
#define MC_CMD_SET_LINK 0x2a
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
/* ??? */
@@ -2550,6 +2609,8 @@
*/
#define MC_CMD_SET_ID_LED 0x2b
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
@@ -2568,6 +2629,8 @@
*/
#define MC_CMD_SET_MAC 0x2c
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
@@ -2609,6 +2672,8 @@
*/
#define MC_CMD_PHY_STATS 0x2d
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
/* ??? */
@@ -2687,8 +2752,10 @@
*/
#define MC_CMD_MAC_STATS 0x2e
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MAC_STATS_IN msgrequest */
-#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_LEN 20
/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
@@ -2710,6 +2777,8 @@
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -2824,11 +2893,31 @@
/* enum: RXDP counter: Number of times an emergency descriptor fetch was
* performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
@@ -2926,6 +3015,8 @@
*/
#define MC_CMD_WOL_FILTER_SET 0x32
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
@@ -3020,6 +3111,8 @@
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
@@ -3035,6 +3128,8 @@
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
@@ -3069,6 +3164,8 @@
*/
#define MC_CMD_NVRAM_TYPES 0x36
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TYPES_IN msgrequest */
#define MC_CMD_NVRAM_TYPES_IN_LEN 0
@@ -3125,6 +3222,8 @@
*/
#define MC_CMD_NVRAM_INFO 0x37
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
@@ -3157,6 +3256,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
@@ -3175,6 +3276,8 @@
*/
#define MC_CMD_NVRAM_READ 0x39
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
@@ -3202,6 +3305,8 @@
*/
#define MC_CMD_NVRAM_WRITE 0x3a
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_WRITE_IN msgrequest */
#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
@@ -3228,6 +3333,8 @@
*/
#define MC_CMD_NVRAM_ERASE 0x3b
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
@@ -3248,6 +3355,8 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
@@ -3279,6 +3388,8 @@
*/
#define MC_CMD_REBOOT 0x3d
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
@@ -3316,6 +3427,8 @@
*/
#define MC_CMD_REBOOT_MODE 0x3f
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
@@ -3368,6 +3481,8 @@
*/
#define MC_CMD_SENSOR_INFO 0x41
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -3542,6 +3657,8 @@
*/
#define MC_CMD_READ_SENSORS 0x42
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
@@ -3602,6 +3719,8 @@
*/
#define MC_CMD_GET_PHY_STATE 0x43
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
@@ -3636,6 +3755,8 @@
*/
#define MC_CMD_WOL_FILTER_GET 0x45
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
@@ -3651,6 +3772,8 @@
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
@@ -3692,6 +3815,8 @@
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
@@ -3722,6 +3847,8 @@
*/
#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_TESTASSERT_IN msgrequest */
#define MC_CMD_TESTASSERT_IN_LEN 0
@@ -3739,6 +3866,8 @@
*/
#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
@@ -3765,6 +3894,8 @@
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
@@ -3788,6 +3919,8 @@
*/
#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
@@ -3849,6 +3982,8 @@
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
@@ -3890,6 +4025,8 @@
*/
#define MC_CMD_NVRAM_PARTITIONS 0x51
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
@@ -3913,6 +4050,8 @@
*/
#define MC_CMD_NVRAM_METADATA 0x52
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_NVRAM_METADATA_IN msgrequest */
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
@@ -3958,6 +4097,8 @@
*/
#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
@@ -4087,11 +4228,66 @@
/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
/* MC_CMD_READ_REGS
* Get a dump of the MCPU registers
*/
#define MC_CMD_READ_REGS 0x50
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -4115,6 +4311,8 @@
*/
#define MC_CMD_INIT_EVQ 0x80
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_EVQ_IN msgrequest */
#define MC_CMD_INIT_EVQ_IN_LENMIN 44
#define MC_CMD_INIT_EVQ_IN_LENMAX 548
@@ -4213,6 +4411,8 @@
*/
#define MC_CMD_INIT_RXQ 0x81
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_RXQ_IN msgrequest */
#define MC_CMD_INIT_RXQ_IN_LENMIN 36
#define MC_CMD_INIT_RXQ_IN_LENMAX 252
@@ -4265,6 +4465,8 @@
*/
#define MC_CMD_INIT_TXQ 0x82
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_INIT_TXQ_IN msgrequest */
#define MC_CMD_INIT_TXQ_IN_LENMIN 36
#define MC_CMD_INIT_TXQ_IN_LENMAX 252
@@ -4322,6 +4524,8 @@
*/
#define MC_CMD_FINI_EVQ 0x83
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_EVQ_IN msgrequest */
#define MC_CMD_FINI_EVQ_IN_LEN 4
/* Instance of EVQ to destroy. Should be the same instance as that previously
@@ -4339,6 +4543,8 @@
*/
#define MC_CMD_FINI_RXQ 0x84
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_RXQ_IN msgrequest */
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
@@ -4354,6 +4560,8 @@
*/
#define MC_CMD_FINI_TXQ 0x85
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FINI_TXQ_IN msgrequest */
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
@@ -4369,6 +4577,8 @@
*/
#define MC_CMD_DRIVER_EVENT 0x86
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DRIVER_EVENT_IN msgrequest */
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
@@ -4392,6 +4602,8 @@
*/
#define MC_CMD_PROXY_CMD 0x5b
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PROXY_CMD_IN msgrequest */
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
@@ -4414,6 +4626,8 @@
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
@@ -4437,6 +4651,8 @@
*/
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
@@ -4463,6 +4679,8 @@
*/
#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
@@ -4477,6 +4695,8 @@
*/
#define MC_CMD_FILTER_OP 0x8a
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FILTER_OP_IN msgrequest */
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
@@ -4637,6 +4857,8 @@
*/
#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
@@ -4669,6 +4891,8 @@
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
@@ -4719,6 +4943,8 @@
*/
#define MC_CMD_GET_PF_COUNT 0xb6
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PF_COUNT_IN msgrequest */
#define MC_CMD_GET_PF_COUNT_IN_LEN 0
@@ -4750,6 +4976,8 @@
*/
#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
@@ -4765,6 +4993,8 @@
*/
#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
@@ -4780,6 +5010,8 @@
*/
#define MC_CMD_ALLOC_VIS 0x8b
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_ALLOC_VIS_IN msgrequest */
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
@@ -4804,6 +5036,8 @@
*/
#define MC_CMD_FREE_VIS 0x8c
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_FREE_VIS_IN msgrequest */
#define MC_CMD_FREE_VIS_IN_LEN 0
@@ -4817,6 +5051,8 @@
*/
#define MC_CMD_GET_SRIOV_CFG 0xba
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
@@ -4841,6 +5077,8 @@
*/
#define MC_CMD_SET_SRIOV_CFG 0xbb
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
@@ -4870,6 +5108,8 @@
*/
#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
@@ -4889,6 +5129,8 @@
*/
#define MC_CMD_DUMP_VI_STATE 0x8e
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
@@ -4998,6 +5240,8 @@
*/
#define MC_CMD_ALLOC_PIOBUF 0x8f
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
@@ -5013,6 +5257,8 @@
*/
#define MC_CMD_FREE_PIOBUF 0x90
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_FREE_PIOBUF_IN msgrequest */
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
@@ -5028,6 +5274,8 @@
*/
#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
@@ -5062,6 +5310,8 @@
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
@@ -5096,6 +5346,8 @@
*/
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5157,6 +5409,8 @@
*/
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
@@ -5203,6 +5457,8 @@
*/
#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
* are subtle, and so downloads must proceed in a number of phases.
*
@@ -5318,6 +5574,7 @@
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
@@ -5343,6 +5600,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5433,6 +5692,8 @@
*/
#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
@@ -5448,6 +5709,8 @@
*/
#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
@@ -5463,6 +5726,8 @@
*/
#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
@@ -5480,6 +5745,8 @@
*/
#define MC_CMD_TCM_TXQ_INIT 0xb5
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
@@ -5511,6 +5778,8 @@
*/
#define MC_CMD_LINK_PIOBUF 0x92
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_LINK_PIOBUF_IN msgrequest */
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
@@ -5528,6 +5797,8 @@
*/
#define MC_CMD_UNLINK_PIOBUF 0x93
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
@@ -5543,6 +5814,8 @@
*/
#define MC_CMD_VSWITCH_ALLOC 0x94
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
@@ -5572,6 +5845,8 @@
*/
#define MC_CMD_VSWITCH_FREE 0x95
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VSWITCH_FREE_IN msgrequest */
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
@@ -5587,6 +5862,8 @@
*/
#define MC_CMD_VPORT_ALLOC 0x96
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ALLOC_IN msgrequest */
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
@@ -5636,6 +5913,8 @@
*/
#define MC_CMD_VPORT_FREE 0x97
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_FREE_IN msgrequest */
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
@@ -5651,8 +5930,10 @@
*/
#define MC_CMD_VADAPTOR_ALLOC 0x98
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
-#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
/* Flags controlling v-adaptor creation */
@@ -5661,6 +5942,19 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -5672,6 +5966,8 @@
*/
#define MC_CMD_VADAPTOR_FREE 0x99
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
@@ -5682,11 +5978,53 @@
/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
@@ -5708,6 +6046,8 @@
*/
#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
@@ -5736,6 +6076,8 @@
*/
#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
@@ -5753,6 +6095,8 @@
*/
#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
@@ -5768,6 +6112,8 @@
*/
#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
@@ -5800,6 +6146,8 @@
*/
#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
@@ -5815,6 +6163,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
@@ -5833,6 +6183,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
@@ -5851,6 +6203,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
@@ -5869,6 +6223,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
@@ -5887,6 +6243,8 @@
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
@@ -5912,6 +6270,8 @@
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
@@ -5937,6 +6297,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
@@ -5959,6 +6321,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -5974,6 +6338,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
@@ -5994,6 +6360,8 @@
*/
#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
@@ -6014,6 +6382,8 @@
*/
#define MC_CMD_GET_VECTOR_CFG 0xbf
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
@@ -6033,6 +6403,8 @@
*/
#define MC_CMD_SET_VECTOR_CFG 0xc0
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
@@ -6423,6 +6795,8 @@
*/
#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6441,6 +6815,8 @@
*/
#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
@@ -6459,6 +6835,8 @@
*/
#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
@@ -6486,6 +6864,8 @@
*/
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
@@ -6510,6 +6890,8 @@
*/
#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
@@ -6526,6 +6908,8 @@
*/
#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
@@ -6890,6 +7274,8 @@
*/
#define MC_CMD_GET_CLOCK 0xac
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_CLOCK_IN msgrequest */
#define MC_CMD_GET_CLOCK_IN_LEN 0
@@ -6907,6 +7293,8 @@
*/
#define MC_CMD_SET_CLOCK 0xad
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 12
/* Requested system frequency in MHz; 0 leaves unchanged. */
@@ -6932,6 +7320,8 @@
*/
#define MC_CMD_DPCPU_RPC 0xae
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
@@ -7016,6 +7406,8 @@
*/
#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
@@ -7031,6 +7423,8 @@
*/
#define MC_CMD_CAP_BLK_READ 0xe7
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
@@ -7055,6 +7449,8 @@
*/
#define MC_CMD_DUMP_DO 0xe8
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
@@ -7108,6 +7504,8 @@
*/
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
@@ -7151,6 +7549,8 @@
*/
#define MC_CMD_SET_PSU 0xea
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
@@ -7171,6 +7571,8 @@
*/
#define MC_CMD_GET_FUNCTION_INFO 0xec
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
@@ -7188,6 +7590,8 @@
*/
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -7203,6 +7607,8 @@
*/
#define MC_CMD_UART_SEND_DATA 0xee
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
@@ -7231,6 +7637,8 @@
*/
#define MC_CMD_UART_RECV_DATA 0xef
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
@@ -7266,6 +7674,8 @@
*/
#define MC_CMD_READ_FUSES 0xf0
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
@@ -7292,6 +7702,8 @@
*/
#define MC_CMD_KR_TUNE 0xf1
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
#define MC_CMD_KR_TUNE_IN_LENMAX 252
@@ -7550,6 +7962,8 @@
*/
#define MC_CMD_PCIE_TUNE 0xf2
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_PCIE_TUNE_IN msgrequest */
#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
@@ -7711,6 +8125,8 @@
*/
#define MC_CMD_LICENSING 0xf3
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSING_IN msgrequest */
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
@@ -7756,6 +8172,8 @@
*/
#define MC_CMD_MC2MC_PROXY 0xf4
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_MC2MC_PROXY_IN msgrequest */
#define MC_CMD_MC2MC_PROXY_IN_LEN 0
@@ -7771,6 +8189,8 @@
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
@@ -7792,6 +8212,8 @@
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
@@ -7847,6 +8269,8 @@
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
@@ -7881,6 +8305,8 @@
*/
#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index fb19b70eac01..7f295c4d7b80 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -865,6 +865,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+ /* This has no effect on EF10 */
ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
efx->net_dev->dev_addr);
@@ -923,6 +924,7 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -944,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+ NULL, 0, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 325dd94bca46..d72f522bf9c3 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -25,6 +25,7 @@
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
#include <linux/mtd/mtd.h>
@@ -793,7 +794,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_vf;
struct vfdi_status;
/**
@@ -897,7 +897,8 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @filter_lock: Filter table lock
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -909,7 +910,6 @@ struct vfdi_status;
* completed (either success or failure). Not used when MCDI is used to
* flush receive queues.
* @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
- * @vf: Array of &struct efx_vf objects.
* @vf_count: Number of VFs intended to be enabled.
* @vf_init_count: Number of VFs that have been fully initialised.
* @vi_scale: log2 number of vnics per VF.
@@ -1040,6 +1040,7 @@ struct efx_nic {
void *loopback_selftest;
+ struct rw_semaphore filter_sem;
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
@@ -1053,7 +1054,6 @@ struct efx_nic {
wait_queue_head_t flush_wq;
#ifdef CONFIG_SFC_SRIOV
- struct efx_vf *vf;
unsigned vf_count;
unsigned vf_init_count;
unsigned vi_scale;
@@ -1092,6 +1092,7 @@ struct efx_mtd_partition {
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
* @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
@@ -1204,6 +1205,7 @@ struct efx_mtd_partition {
* @ptp_set_ts_config: Set hardware timestamp configuration. The flags
* and tx_type will already have been validated but this operation
* must validate and update rx_filter.
+ * @set_mac_address: Set the MAC address of the device
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1226,6 +1228,8 @@ struct efx_mtd_partition {
* @hwtstamp_filters: Mask of hardware timestamp filter types supported
*/
struct efx_nic_type {
+ bool is_vf;
+ unsigned int mem_bar;
unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
@@ -1277,7 +1281,8 @@ struct efx_nic_type {
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
- void (*rx_push_rss_config)(struct efx_nic *efx);
+ int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1330,11 +1335,28 @@ struct efx_nic_type {
int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
+ int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
- void (*sriov_mac_address_changed)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
+ void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+ u8 qos);
+ int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+ bool spoofchk);
+ int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+ struct ifla_vf_info *ivi);
+ int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+ int link_state);
+ int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+ int (*vswitching_probe)(struct efx_nic *efx);
+ int (*vswitching_restore)(struct efx_nic *efx);
+ void (*vswitching_remove)(struct efx_nic *efx);
+ int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+ int (*set_mac_address)(struct efx_nic *efx);
int revision;
unsigned int txd_ptr_tbl_base;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 93d10cbbd1cf..31ff9084d9a4 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -381,6 +381,7 @@ enum {
* @efx: Pointer back to main interface structure
* @wol_filter_id: Wake-on-LAN packet filter id
* @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
* @local_addr_list: List of local addresses. Protected by %local_lock.
@@ -394,6 +395,7 @@ struct siena_nic_data {
int wol_filter_id;
u64 stats[SIENA_STAT_COUNT];
#ifdef CONFIG_SFC_SRIOV
+ struct siena_vf *vf;
struct efx_channel *vfdi_channel;
unsigned vf_buftbl_base;
struct efx_buffer vfdi_status;
@@ -405,59 +407,77 @@ struct siena_nic_data {
};
enum {
- EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
- EF10_STAT_tx_packets,
- EF10_STAT_tx_pause,
- EF10_STAT_tx_control,
- EF10_STAT_tx_unicast,
- EF10_STAT_tx_multicast,
- EF10_STAT_tx_broadcast,
- EF10_STAT_tx_lt64,
- EF10_STAT_tx_64,
- EF10_STAT_tx_65_to_127,
- EF10_STAT_tx_128_to_255,
- EF10_STAT_tx_256_to_511,
- EF10_STAT_tx_512_to_1023,
- EF10_STAT_tx_1024_to_15xx,
- EF10_STAT_tx_15xx_to_jumbo,
- EF10_STAT_rx_bytes,
- EF10_STAT_rx_bytes_minus_good_bytes,
- EF10_STAT_rx_good_bytes,
- EF10_STAT_rx_bad_bytes,
- EF10_STAT_rx_packets,
- EF10_STAT_rx_good,
- EF10_STAT_rx_bad,
- EF10_STAT_rx_pause,
- EF10_STAT_rx_control,
+ EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF10_STAT_port_tx_packets,
+ EF10_STAT_port_tx_pause,
+ EF10_STAT_port_tx_control,
+ EF10_STAT_port_tx_unicast,
+ EF10_STAT_port_tx_multicast,
+ EF10_STAT_port_tx_broadcast,
+ EF10_STAT_port_tx_lt64,
+ EF10_STAT_port_tx_64,
+ EF10_STAT_port_tx_65_to_127,
+ EF10_STAT_port_tx_128_to_255,
+ EF10_STAT_port_tx_256_to_511,
+ EF10_STAT_port_tx_512_to_1023,
+ EF10_STAT_port_tx_1024_to_15xx,
+ EF10_STAT_port_tx_15xx_to_jumbo,
+ EF10_STAT_port_rx_bytes,
+ EF10_STAT_port_rx_bytes_minus_good_bytes,
+ EF10_STAT_port_rx_good_bytes,
+ EF10_STAT_port_rx_bad_bytes,
+ EF10_STAT_port_rx_packets,
+ EF10_STAT_port_rx_good,
+ EF10_STAT_port_rx_bad,
+ EF10_STAT_port_rx_pause,
+ EF10_STAT_port_rx_control,
+ EF10_STAT_port_rx_unicast,
+ EF10_STAT_port_rx_multicast,
+ EF10_STAT_port_rx_broadcast,
+ EF10_STAT_port_rx_lt64,
+ EF10_STAT_port_rx_64,
+ EF10_STAT_port_rx_65_to_127,
+ EF10_STAT_port_rx_128_to_255,
+ EF10_STAT_port_rx_256_to_511,
+ EF10_STAT_port_rx_512_to_1023,
+ EF10_STAT_port_rx_1024_to_15xx,
+ EF10_STAT_port_rx_15xx_to_jumbo,
+ EF10_STAT_port_rx_gtjumbo,
+ EF10_STAT_port_rx_bad_gtjumbo,
+ EF10_STAT_port_rx_overflow,
+ EF10_STAT_port_rx_align_error,
+ EF10_STAT_port_rx_length_error,
+ EF10_STAT_port_rx_nodesc_drops,
+ EF10_STAT_port_rx_pm_trunc_bb_overflow,
+ EF10_STAT_port_rx_pm_discard_bb_overflow,
+ EF10_STAT_port_rx_pm_trunc_vfifo_full,
+ EF10_STAT_port_rx_pm_discard_vfifo_full,
+ EF10_STAT_port_rx_pm_trunc_qbb,
+ EF10_STAT_port_rx_pm_discard_qbb,
+ EF10_STAT_port_rx_pm_discard_mapping,
+ EF10_STAT_port_rx_dp_q_disabled_packets,
+ EF10_STAT_port_rx_dp_di_dropped_packets,
+ EF10_STAT_port_rx_dp_streaming_packets,
+ EF10_STAT_port_rx_dp_hlb_fetch,
+ EF10_STAT_port_rx_dp_hlb_wait,
EF10_STAT_rx_unicast,
+ EF10_STAT_rx_unicast_bytes,
EF10_STAT_rx_multicast,
+ EF10_STAT_rx_multicast_bytes,
EF10_STAT_rx_broadcast,
- EF10_STAT_rx_lt64,
- EF10_STAT_rx_64,
- EF10_STAT_rx_65_to_127,
- EF10_STAT_rx_128_to_255,
- EF10_STAT_rx_256_to_511,
- EF10_STAT_rx_512_to_1023,
- EF10_STAT_rx_1024_to_15xx,
- EF10_STAT_rx_15xx_to_jumbo,
- EF10_STAT_rx_gtjumbo,
- EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_broadcast_bytes,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_bad_bytes,
EF10_STAT_rx_overflow,
- EF10_STAT_rx_align_error,
- EF10_STAT_rx_length_error,
- EF10_STAT_rx_nodesc_drops,
- EF10_STAT_rx_pm_trunc_bb_overflow,
- EF10_STAT_rx_pm_discard_bb_overflow,
- EF10_STAT_rx_pm_trunc_vfifo_full,
- EF10_STAT_rx_pm_discard_vfifo_full,
- EF10_STAT_rx_pm_trunc_qbb,
- EF10_STAT_rx_pm_discard_qbb,
- EF10_STAT_rx_pm_discard_mapping,
- EF10_STAT_rx_dp_q_disabled_packets,
- EF10_STAT_rx_dp_di_dropped_packets,
- EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_hlb_fetch,
- EF10_STAT_rx_dp_hlb_wait,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_unicast_bytes,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_multicast_bytes,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_broadcast_bytes,
+ EF10_STAT_tx_bad,
+ EF10_STAT_tx_bad_bytes,
+ EF10_STAT_tx_overflow,
EF10_STAT_COUNT
};
@@ -483,12 +503,21 @@ enum {
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
+ * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -503,126 +532,27 @@ struct efx_ef10_nic_data {
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
bool must_restore_piobufs;
u32 rx_rss_context;
+ bool rx_rss_context_exclusive;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool must_check_datapath_caps;
u32 datapath_caps;
-};
-
-/*
- * On the SFC9000 family each port is associated with 1 PCI physical
- * function (PF) handled by sfc and a configurable number of virtual
- * functions (VFs) that may be handled by some other driver, often in
- * a VM guest. The queue pointer registers are mapped in both PF and
- * VF BARs such that an 8K region provides access to a single RX, TX
- * and event queue (collectively a Virtual Interface, VI or VNIC).
- *
- * The PF has access to all 1024 VIs while VFs are mapped to VIs
- * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
- * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
- * The number of VIs and the VI_SCALE value are configurable but must
- * be established at boot time by firmware.
- */
-
-/* Maximum VI_SCALE parameter supported by Siena */
-#define EFX_VI_SCALE_MAX 6
-/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
- * so this is the smallest allowed value. */
-#define EFX_VI_BASE 128U
-/* Maximum number of VFs allowed */
-#define EFX_VF_COUNT_MAX 127
-/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
-#define EFX_MAX_VF_EVQ_SIZE 8192UL
-/* The number of buffer table entries reserved for each VI on a VF */
-#define EFX_VF_BUFTBL_PER_VI \
- ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
- sizeof(efx_qword_t) / EFX_BUF_SIZE)
-
+ unsigned int rx_dpcpu_fw_id;
+ unsigned int tx_dpcpu_fw_id;
+ unsigned int vport_id;
+ bool must_probe_vswitching;
+ unsigned int pf_index;
+ u8 port_id[ETH_ALEN];
#ifdef CONFIG_SFC_SRIOV
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
-{
- return efx->vf_count != 0;
-}
-
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
-{
- return efx->vf_init_count != 0;
-}
-
-static inline unsigned int efx_vf_size(struct efx_nic *efx)
-{
- return 1 << efx->vi_scale;
-}
+ unsigned int vf_index;
+ struct ef10_vf *vf;
+#endif
+ u8 vport_mac[ETH_ALEN];
+};
int efx_init_sriov(void);
-void efx_siena_sriov_probe(struct efx_nic *efx);
-int efx_siena_sriov_init(struct efx_nic *efx);
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_siena_sriov_reset(struct efx_nic *efx);
-void efx_siena_sriov_fini(struct efx_nic *efx);
void efx_fini_sriov(void);
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#else
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
-static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_event(struct efx_channel *channel,
- efx_qword_t *event) {}
-static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
- unsigned dmaq) {}
-static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
-static inline void efx_fini_sriov(void) {}
-
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#endif
-
-/* FALCON */
-static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
-
-int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
- u16 vlan, u8 qos);
-int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
- struct ifla_vf_info *ivf);
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
- bool spoofchk);
-
struct ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
@@ -654,6 +584,7 @@ extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
/**************************************************************************
*
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a2e9aee05cdd..ad62615a93dc 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -306,7 +306,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+ _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
unsigned int good_syncs;
unsigned int fast_syncs;
@@ -389,11 +389,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+ if (rc)
memset(outbuf, 0, sizeof(outbuf));
- }
efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
efx_ptp_stat_mask,
stats, _MCDI_PTR(outbuf, 0), false);
@@ -490,14 +487,20 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
*/
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &out_len);
- if (rc == 0)
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &out_len);
+ if (rc == 0) {
fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
- else if (rc == -EINVAL)
+ } else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
- else
+ } else if (rc == -EPERM) {
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ return rc;
+ } else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+ outbuf, sizeof(outbuf), rc);
return rc;
+ }
if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
@@ -541,8 +544,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), NULL);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
if (rc == 0) {
efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
@@ -558,6 +561,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
efx->ptp_data->ts_corrections.pps_out = 0;
efx->ptp_data->ts_corrections.pps_in = 0;
} else {
+ efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+ sizeof(outbuf), rc);
return rc;
}
@@ -568,7 +573,7 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
static int efx_ptp_enable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
@@ -596,7 +601,7 @@ static int efx_ptp_enable(struct efx_nic *efx)
static int efx_ptp_disable(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
- MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+ MCDI_DECLARE_BUF_ERR(outbuf);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
@@ -604,7 +609,12 @@ static int efx_ptp_disable(struct efx_nic *efx)
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
rc = (rc == -EALREADY) ? 0 : rc;
- if (rc)
+ /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+ * should only have been called during probe.
+ */
+ if (rc == -ENOSYS || rc == -EPERM)
+ netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
outbuf, sizeof(outbuf), rc);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index f12c811938d2..b323b9167526 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,6 +25,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "selftest.h"
+#include "siena_sriov.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
@@ -306,7 +307,9 @@ static int siena_probe_nic(struct efx_nic *efx)
if (rc)
goto fail5;
+#ifdef CONFIG_SFC_SRIOV
efx_siena_sriov_probe(efx);
+#endif
efx_ptp_defer_probe_with_channel(efx);
return 0;
@@ -321,7 +324,8 @@ fail1:
return rc;
}
-static void siena_rx_push_rss_config(struct efx_nic *efx)
+static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
+ const u32 *rx_indir_table)
{
efx_oword_t temp;
@@ -343,7 +347,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx)
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ memcpy(efx->rx_indir_table, rx_indir_table,
+ sizeof(efx->rx_indir_table));
efx_farch_rx_push_indir_table(efx);
+
+ return 0;
}
/* This call performs hardware-specific global initialisation, such as
@@ -386,7 +394,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- siena_rx_push_rss_config(efx);
+ siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -909,6 +917,8 @@ fail:
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .is_vf = false,
+ .mem_bar = EFX_MEM_BAR,
.mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
@@ -996,11 +1006,22 @@ const struct efx_nic_type siena_a0_nic_type = {
#endif
.ptp_write_host_time = siena_ptp_write_host_time,
.ptp_set_ts_config = siena_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+ .sriov_configure = efx_siena_sriov_configure,
.sriov_init = efx_siena_sriov_init,
.sriov_fini = efx_siena_sriov_fini,
- .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
.sriov_wanted = efx_siena_sriov_wanted,
.sriov_reset = efx_siena_sriov_reset,
+ .sriov_flr = efx_siena_sriov_flr,
+ .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
+ .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+ .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+ .sriov_get_vf_config = efx_siena_sriov_get_vf_config,
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+ .set_mac_address = efx_siena_sriov_mac_address_changed,
+#endif
.revision = EFX_REV_SIENA_A0,
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index fe83430796fd..da7b94f34604 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "mcdi_pcol.h"
#include "farch_regs.h"
+#include "siena_sriov.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode {
};
/**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
@@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode {
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
-struct efx_vf {
+struct siena_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
@@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs,
*/
static struct workqueue_struct *vfdi_workqueue;
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
@@ -207,8 +208,8 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
- rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
- outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+ outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -299,7 +300,7 @@ out:
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
}
/* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
@@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
}
}
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
* local_page_list, either by acquiring local_lock or by running from
* efx_siena_sriov_peer_work()
*/
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
- struct efx_vf **vf_out, unsigned *rel_index_out)
+ struct siena_vf **vf_out, unsigned *rel_index_out)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
@@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
return true;
if (vf_out)
- *vf_out = efx->vf + vf_i;
+ *vf_out = nic_data->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
@@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
@@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf)
atomic_read(&vf->rxq_retry_count);
}
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
@@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf)
atomic_set(&vf->rxq_retry_count, 0);
}
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
@@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
@@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf)
return VFDI_RC_SUCCESS;
}
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
@@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
static void efx_siena_sriov_vfdi(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
@@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work)
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
@@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
{
- struct efx_vf *vf = container_of(work, struct efx_vf, req);
+ struct siena_vf *vf = container_of(work, struct siena_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
@@ -1055,8 +1058,10 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
if (!max_vfs)
return;
- if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+ if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
+ netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
return;
+ }
if (count > 0 && count > max_vfs)
count = max_vfs;
@@ -1077,7 +1082,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_work);
struct efx_nic *efx = nic_data->efx;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
- struct efx_vf *vf;
+ struct siena_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
@@ -1099,7 +1104,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
@@ -1155,7 +1160,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
@@ -1190,14 +1195,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx)
static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
- struct efx_vf *vf;
+ struct siena_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
- efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
- if (!efx->vf)
+ nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+ GFP_KERNEL);
+ if (!nic_data->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
vf->efx = efx;
vf->index = index;
@@ -1216,11 +1223,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
@@ -1237,7 +1245,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
struct siena_nic_data *nic_data = efx->nic_data;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
- struct efx_vf *vf;
+ struct siena_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
@@ -1250,7 +1258,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
buftbl_base = nic_data->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
- vf = efx->vf + index;
+ vf = nic_data->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
@@ -1350,7 +1358,7 @@ fail_pci:
fail_vfs:
cancel_work_sync(&nic_data->peer_work);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
fail_alloc:
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
fail_status:
@@ -1361,7 +1369,7 @@ fail_cmd:
void efx_siena_sriov_fini(struct efx_nic *efx)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int pos;
struct siena_nic_data *nic_data = efx->nic_data;
@@ -1377,7 +1385,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
- vf = efx->vf + pos;
+ vf = nic_data->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
@@ -1388,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
/* Tear down back-end state */
efx_siena_sriov_vfs_fini(efx);
efx_siena_sriov_free_local(efx);
- kfree(efx->vf);
+ kfree(nic_data->vf);
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
efx_siena_sriov_cmd(efx, false, NULL, NULL);
}
@@ -1396,7 +1404,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
@@ -1452,11 +1460,12 @@ error:
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i > efx->vf_init_count)
return;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
@@ -1467,21 +1476,23 @@ void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
vf->evq0_count = 0;
}
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
if (!efx->vf_init_count)
- return;
+ return 0;
ether_addr_copy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr);
queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+ return 0;
}
void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
@@ -1500,7 +1511,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
@@ -1525,7 +1536,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
/* Called from napi. Schedule the reset work item */
void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
- struct efx_vf *vf;
+ struct siena_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
@@ -1541,9 +1552,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
/* Reset all VFs */
void efx_siena_sriov_reset(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
unsigned int vf_i;
struct efx_buffer buf;
- struct efx_vf *vf;
+ struct siena_vf *vf;
ASSERT_RTNL();
@@ -1557,7 +1569,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
efx_siena_sriov_reset_vf(vf, &buf);
}
@@ -1573,7 +1585,6 @@ int efx_init_sriov(void)
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
if (!vfdi_workqueue)
return -ENOMEM;
-
return 0;
}
@@ -1582,14 +1593,14 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
ether_addr_copy(vf->addr.mac_addr, mac);
@@ -1599,16 +1610,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
return 0;
}
-int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
@@ -1619,16 +1630,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
return 0;
}
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
bool spoofchk)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
@@ -1643,16 +1654,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
return rc;
}
-int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
struct ifla_vf_info *ivi)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_vf *vf;
+ struct siena_nic_data *nic_data = efx->nic_data;
+ struct siena_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
- vf = efx->vf + vf_i;
+ vf = nic_data->vf + vf_i;
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
@@ -1666,3 +1677,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
return 0;
}
+bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+ return efx->vf_count != 0;
+}
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
new file mode 100644
index 000000000000..d88d4dab170a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef SIENA_SRIOV_H
+#define SIENA_SRIOV_H
+
+#include "net_driver.h"
+
+/* On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest. The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value.
+ */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI \
+ ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) * \
+ sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+bool efx_siena_sriov_wanted(struct efx_nic *efx);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
+ u16 vlan, u8 qos);
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+ bool spoofchk);
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
+ struct ifla_vf_info *ivf);
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return efx->vf_init_count != 0;
+}
+#else /* !CONFIG_SFC_SRIOV */
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+ return false;
+}
+#endif /* CONFIG_SFC_SRIOV */
+
+void efx_siena_sriov_probe(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+
+#endif /* SIENA_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
new file mode 100644
index 000000000000..816c44689e67
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_mac)
+ return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_vlan) {
+ if ((vlan & ~VLAN_VID_MASK) ||
+ (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+ return -EINVAL;
+
+ return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_spoofchk)
+ return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_vf_config)
+ return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_set_vf_link_state)
+ return efx->type->sriov_set_vf_link_state(efx, vf_i,
+ link_state);
+ else
+ return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_phys_port_id)
+ return efx->type->sriov_get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
new file mode 100644
index 000000000000..400df526586d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -0,0 +1,31 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+ u8 qos);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+ bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+ struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+ int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
+
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 7d3af190be55..cec147d1d34f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -16,6 +16,7 @@ if STMMAC_ETH
config STMMAC_PLATFORM
tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
+ select MFD_SYSCON
default y
---help---
This selects the platform specific bus support for the stmmac driver.
@@ -26,6 +27,95 @@ config STMMAC_PLATFORM
If unsure, say N.
+if STMMAC_PLATFORM
+
+config DWMAC_GENERIC
+ tristate "Generic driver for DWMAC"
+ default STMMAC_PLATFORM
+ ---help---
+ Generic DWMAC driver for platforms that don't require any
+ platform specific code to function or is using platform
+ data for setup.
+
+config DWMAC_IPQ806X
+ tristate "QCA IPQ806x DWMAC support"
+ default ARCH_QCOM
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for QCA IPQ806X DWMAC Ethernet.
+
+ This selects the IPQ806x SoC glue layer support for the stmmac
+ device driver. This driver does not use any of the hardware
+ acceleration features available on this SoC. Network devices
+ will behave like standard non-accelerated ethernet interfaces.
+
+config DWMAC_LPC18XX
+ tristate "NXP LPC18xx/43xx DWMAC support"
+ default ARCH_LPC18XX
+ depends on OF
+ select MFD_SYSCON
+ ---help---
+ Support for NXP LPC18xx/43xx DWMAC Ethernet.
+
+config DWMAC_MESON
+ tristate "Amlogic Meson dwmac support"
+ default ARCH_MESON
+ depends on OF
+ help
+ Support for Ethernet controller on Amlogic Meson SoCs.
+
+ This selects the Amlogic Meson SoC glue layer support for
+ the stmmac device driver. This driver is used for Meson6 and
+ Meson8 SoCs.
+
+config DWMAC_ROCKCHIP
+ tristate "Rockchip dwmac support"
+ default ARCH_ROCKCHIP
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for Ethernet controller on Rockchip RK3288 SoC.
+
+ This selects the Rockchip RK3288 SoC glue layer support for
+ the stmmac device driver.
+
+config DWMAC_SOCFPGA
+ tristate "SOCFPGA dwmac support"
+ default ARCH_SOCFPGA
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Altera SOCFPGA
+
+ This selects the Altera SOCFPGA SoC glue layer support
+ for the stmmac device driver. This driver is used for
+ arria5 and cyclone5 FPGA SoCs.
+
+config DWMAC_STI
+ tristate "STi GMAC support"
+ default ARCH_STI
+ depends on OF
+ select MFD_SYSCON
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
+config DWMAC_SUNXI
+ tristate "Allwinner GMAC support"
+ default ARCH_SUNXI
+ depends on OF
+ ---help---
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+endif
+
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 73c2715a27f3..b3901616f4f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -4,9 +4,17 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
-obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
-stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o \
- dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
+# Ordering matters. Generic driver must be last.
+obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
+obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
+obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
+stmmac-platform-objs:= stmmac_platform.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
new file mode 100644
index 000000000000..e817a1a44379
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -0,0 +1,41 @@
+/*
+ * Generic DWMAC platform driver
+ *
+ * Copyright (C) 2007-2011 STMicroelectronics Ltd
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+static const struct of_device_id dwmac_generic_match[] = {
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwmac_generic_match);
+
+static struct platform_driver dwmac_generic_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(dwmac_generic_match),
+ },
+};
+module_platform_driver(dwmac_generic_driver);
+
+MODULE_DESCRIPTION("Generic dwmac driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644
index 000000000000..7e3129e7f143
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE 0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0 0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK 0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL 0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL 0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000 1
+#define NSS_COMMON_CLK_DIV_RGMII_100 9
+#define NSS_COMMON_CLK_DIV_RGMII_10 99
+#define NSS_COMMON_CLK_DIV_SGMII_1000 0
+#define NSS_COMMON_CLK_DIV_SGMII_100 4
+#define NSS_COMMON_CLK_DIV_SGMII_10 49
+
+#define QSGMII_PCS_MODE_CTL 0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \
+ (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
+#define QSGMII_PHY_QSGMII_EN BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
+#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf
+
+struct ipq806x_gmac {
+ struct platform_device *pdev;
+ struct regmap *nss_common;
+ struct regmap *qsgmii_csr;
+ uint32_t id;
+ struct clk *core_clk;
+ phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_SGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_SGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_SGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_RGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_RGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_RGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ uint32_t clk_bits, val;
+ int div;
+
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ div = get_clk_div_rgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ div = get_clk_div_sgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+
+ default:
+ dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return -EINVAL;
+ }
+
+ /* Disable the clocks */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val &= ~clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ /* Set the divider */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+ val &= ~(NSS_COMMON_CLK_DIV_MASK
+ << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+ val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+ /* Enable the clock back */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+ struct device *dev = &gmac->pdev->dev;
+
+ gmac->phy_mode = of_get_phy_mode(dev->of_node);
+ if (gmac->phy_mode < 0) {
+ dev_err(dev, "missing phy mode property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+ dev_err(dev, "missing qcom id property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+ * code and keep it consistent with the Linux convention, we'll number
+ * them from 0 to 3 here.
+ */
+ if (gmac->id < 0 || gmac->id > 3) {
+ dev_err(dev, "invalid gmac id\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->core_clk)) {
+ dev_err(dev, "missing stmmaceth clk property\n");
+ return gmac->core_clk;
+ }
+ clk_set_rate(gmac->core_clk, 266000000);
+
+ /* Setup the register map for the nss common registers */
+ gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,nss-common");
+ if (IS_ERR(gmac->nss_common)) {
+ dev_err(dev, "missing nss-common node\n");
+ return gmac->nss_common;
+ }
+
+ /* Setup the register map for the qsgmii csr registers */
+ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,qsgmii-csr");
+ if (IS_ERR(gmac->qsgmii_csr)) {
+ dev_err(dev, "missing qsgmii-csr node\n");
+ return gmac->qsgmii_csr;
+ }
+
+ return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipq806x_gmac *gmac;
+ int val;
+ void *err;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->pdev = pdev;
+
+ err = ipq806x_gmac_of_parse(gmac);
+ if (err) {
+ dev_err(dev, "device tree parsing error\n");
+ return err;
+ }
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+ QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+ /* Inter frame gap is set to 12 */
+ val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+ 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+ /* We also initiate an AXI low power exit request */
+ val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+ /* Configure the clock src according to the mode */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+ val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+ /* Enable PTP clock */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+ 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+ 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+ 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+ 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ }
+
+ return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+ .has_gmac = 1,
+ .setup = ipq806x_gmac_setup,
+ .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+ { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ipq806x-gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = ipq806x_gmac_dwmac_match,
+ },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
new file mode 100644
index 000000000000..cb888d3ebbdc
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -0,0 +1,99 @@
+/*
+ * DWMAC glue for NXP LPC18xx/LPC43xx Ethernet
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* Register defines for CREG syscon */
+#define LPC18XX_CREG_CREG6 0x12c
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7
+# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
+# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
+
+struct lpc18xx_dwmac_priv_data {
+ struct regmap *reg;
+ int interface;
+};
+
+static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+{
+ struct lpc18xx_dwmac_priv_data *dwmac;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
+ if (dwmac->interface < 0)
+ return ERR_PTR(dwmac->interface);
+
+ dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(dwmac->reg)) {
+ dev_err(&pdev->dev, "Syscon lookup failed\n");
+ return dwmac->reg;
+ }
+
+ return dwmac;
+}
+
+static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct lpc18xx_dwmac_priv_data *dwmac = priv;
+ u8 ethmode;
+
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
+ } else {
+ dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+ LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+
+ return 0;
+}
+
+static const struct stmmac_of_data lpc18xx_dwmac_data = {
+ .has_gmac = 1,
+ .setup = lpc18xx_dwmac_setup,
+ .init = lpc18xx_dwmac_init,
+};
+
+static const struct of_device_id lpc18xx_dwmac_match[] = {
+ { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
+
+static struct platform_driver lpc18xx_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "lpc18xx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = lpc18xx_dwmac_match,
+ },
+};
+module_platform_driver(lpc18xx_dwmac_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("DWMAC glue for LPC18xx/43xx Ethernet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index cca028d632f6..61a324a87d09 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -15,6 +15,7 @@
#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/stmmac.h>
@@ -63,7 +64,28 @@ static void *meson6_dwmac_setup(struct platform_device *pdev)
return dwmac;
}
-const struct stmmac_of_data meson6_dwmac_data = {
+static const struct stmmac_of_data meson6_dwmac_data = {
.setup = meson6_dwmac_setup,
.fix_mac_speed = meson6_dwmac_fix_mac_speed,
};
+
+static const struct of_device_id meson6_dwmac_match[] = {
+ { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
+
+static struct platform_driver meson6_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "meson6-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson6_dwmac_match,
+ },
+};
+module_platform_driver(meson6_dwmac_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6249a4ec08f0..30e28f0d9a60 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -22,13 +22,17 @@
#include <linux/phy.h>
#include <linux/of_net.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "stmmac_platform.h"
+
struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
@@ -428,10 +432,31 @@ static void rk_fix_speed(void *priv, unsigned int speed)
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
-const struct stmmac_of_data rk3288_gmac_data = {
+static const struct stmmac_of_data rk3288_gmac_data = {
.has_gmac = 1,
.fix_mac_speed = rk_fix_speed,
.setup = rk_gmac_setup,
.init = rk_gmac_init,
.exit = rk_gmac_exit,
};
+
+static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
+
+static struct platform_driver rk_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "rk_gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = rk_gmac_dwmac_match,
+ },
+};
+module_platform_driver(rk_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 5a36bd2c7837..8141c5b844ae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -257,9 +257,28 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
return ret;
}
-const struct stmmac_of_data socfpga_gmac_data = {
+static const struct stmmac_of_data socfpga_gmac_data = {
.setup = socfpga_dwmac_probe,
.init = socfpga_dwmac_init,
.exit = socfpga_dwmac_exit,
.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
};
+
+static const struct of_device_id socfpga_dwmac_match[] = {
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
+
+static struct platform_driver socfpga_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "socfpga-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = socfpga_dwmac_match,
+ },
+};
+module_platform_driver(socfpga_dwmac_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index bb6e2dc61bec..a2e8111c5d14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -17,6 +17,7 @@
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -351,16 +352,40 @@ static void *sti_dwmac_setup(struct platform_device *pdev)
return dwmac;
}
-const struct stmmac_of_data stih4xx_dwmac_data = {
+static const struct stmmac_of_data stih4xx_dwmac_data = {
.fix_mac_speed = stih4xx_fix_retime_src,
.setup = sti_dwmac_setup,
.init = stix4xx_init,
.exit = sti_dwmac_exit,
};
-const struct stmmac_of_data stid127_dwmac_data = {
+static const struct stmmac_of_data stid127_dwmac_data = {
.fix_mac_speed = stid127_fix_retime_src,
.setup = sti_dwmac_setup,
.init = stid127_init,
.exit = sti_dwmac_exit,
};
+
+static const struct of_device_id sti_dwmac_match[] = {
+ { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
+ { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sti_dwmac_match);
+
+static struct platform_driver sti_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sti-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sti_dwmac_match,
+ },
+};
+module_platform_driver(sti_dwmac_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index c5ea9ab75b03..15048ca39759 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -18,7 +18,9 @@
#include <linux/stmmac.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
@@ -132,7 +134,7 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
/* of_data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers. */
-const struct stmmac_of_data sun7i_gmac_data = {
+static const struct stmmac_of_data sun7i_gmac_data = {
.has_gmac = 1,
.tx_coe = 1,
.fix_mac_speed = sun7i_fix_speed,
@@ -140,3 +142,24 @@ const struct stmmac_of_data sun7i_gmac_data = {
.init = sun7i_gmac_init,
.exit = sun7i_gmac_exit,
};
+
+static const struct of_device_id sun7i_dwmac_match[] = {
+ { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
+
+static struct platform_driver sun7i_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sun7i-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun7i_dwmac_match,
+ },
+};
+module_platform_driver(sun7i_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunxi DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 73bab983edd9..1f3b33a6c6a8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,14 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/reset.h>
+struct stmmac_resources {
+ void __iomem *addr;
+ const char *mac;
+ int wol_irq;
+ int lpi_irq;
+ int irq;
+};
+
struct stmmac_tx_info {
dma_addr_t buf;
bool map_as_page;
@@ -135,9 +143,9 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct net_device *ndev);
int stmmac_suspend(struct net_device *ndev);
int stmmac_dvr_remove(struct net_device *ndev);
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- void __iomem *addr);
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res);
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2c5ce2baca87..a5156739e1e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -52,6 +52,7 @@
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
+#include <linux/of_mdio.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
@@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ if (priv->plat->phy_node) {
+ phydev = of_phy_connect(dev, priv->plat->phy_node,
+ &stmmac_adjust_link, 0, interface);
+ } else {
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n",
+ phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+ interface);
+ }
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev)
* device as well.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if (!priv->plat->phy_node && phydev->phy_id == 0) {
phy_disconnect(phydev);
return -ENODEV;
}
@@ -975,13 +983,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct sk_buff *skb;
- skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
- flags);
+ skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
}
- skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
@@ -2800,16 +2806,15 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* stmmac_dvr_probe
* @device: device pointer
* @plat_dat: platform data pointer
- * @addr: iobase memory address
+ * @res: stmmac resource pointer
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
* Return:
- * on success the new private structure is returned, otherwise the error
- * pointer.
+ * returns 0 on success, otherwise errno.
*/
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
- struct plat_stmmacenet_data *plat_dat,
- void __iomem *addr)
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
{
int ret = 0;
struct net_device *ndev = NULL;
@@ -2817,7 +2822,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, device);
@@ -2828,8 +2833,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
priv->plat = plat_dat;
- priv->ioaddr = addr;
- priv->dev->base_addr = (unsigned long)addr;
+ priv->ioaddr = res->addr;
+ priv->dev->base_addr = (unsigned long)res->addr;
+
+ priv->dev->irq = res->irq;
+ priv->wol_irq = res->wol_irq;
+ priv->lpi_irq = res->lpi_irq;
+
+ if (res->mac)
+ memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+
+ dev_set_drvdata(device, priv);
/* Verify driver arguments */
stmmac_verify_args();
@@ -2944,7 +2958,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
}
}
- return priv;
+ return 0;
error_mdio_register:
unregister_netdev(ndev);
@@ -2957,7 +2971,7 @@ error_pclk_get:
error_clk_get:
free_netdev(ndev);
- return ERR_PTR(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 3bca908716e2..d71a721ea61c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -163,7 +163,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_priv *priv;
+ struct stmmac_resources res;
int i;
int ret;
@@ -214,19 +214,12 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
pci_enable_msi(pdev);
- priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
- if (IS_ERR(priv)) {
- dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
- return PTR_ERR(priv);
- }
- priv->dev->irq = pdev->irq;
- priv->wol_irq = pdev->irq;
-
- pci_set_drvdata(pdev, priv->dev);
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[i];
+ res.wol_irq = pdev->irq;
+ res.irq = pdev->irq;
- dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 68aec5c460db..f3918c7e7eeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -28,29 +28,11 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include "stmmac.h"
#include "stmmac_platform.h"
-static const struct of_device_id stmmac_dt_ids[] = {
- /* SoC specific glue layers should come before generic bindings */
- { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
- { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
- { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
- { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
- { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
- { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
- { .compatible = "st,spear600-gmac"},
- { .compatible = "snps,dwmac-3.610"},
- { .compatible = "snps,dwmac-3.70a"},
- { .compatible = "snps,dwmac-3.710"},
- { .compatible = "snps,dwmac"},
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-
#ifdef CONFIG_OF
/**
@@ -129,14 +111,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct stmmac_dma_cfg *dma_cfg;
const struct of_device_id *device;
+ struct device *dev = &pdev->dev;
- if (!np)
- return -ENODEV;
-
- device = of_match_device(stmmac_dt_ids, &pdev->dev);
- if (!device)
- return -ENODEV;
-
+ device = of_match_device(dev->driver->of_match_table, dev);
if (device->data) {
const struct stmmac_of_data *data = device->data;
plat->has_gmac = data->has_gmac;
@@ -168,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* Default to phy auto-detection */
plat->phy_addr = -1;
+ /* If we find a phy-handle property, use it as the PHY */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* If phy-handle is not specified, check if we have a fixed-phy */
+ if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+ if ((of_phy_register_fixed_link(np) < 0))
+ return -ENODEV;
+
+ plat->phy_node = of_node_get(np);
+ }
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if (plat->phy_bus_name)
+ if (plat->phy_node || plat->phy_bus_name)
plat->mdio_bus_data = NULL;
else
plat->mdio_bus_data =
@@ -232,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_find_property(np, "snps,pbl", NULL)) {
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
- if (!dma_cfg)
+ if (!dma_cfg) {
+ of_node_put(np);
return -ENOMEM;
+ }
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
dma_cfg->fixed_burst =
@@ -268,27 +258,26 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary platform resources, invoke custom helper (if required) and
* invoke the main probe function.
*/
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_pltfr_probe(struct platform_device *pdev)
{
+ struct stmmac_resources stmmac_res;
int ret = 0;
struct resource *res;
struct device *dev = &pdev->dev;
- void __iomem *addr = NULL;
- struct stmmac_priv *priv = NULL;
struct plat_stmmacenet_data *plat_dat = NULL;
- const char *mac = NULL;
- int irq, wol_irq, lpi_irq;
+
+ memset(&stmmac_res, 0, sizeof(stmmac_res));
/* Get IRQ information early to have an ability to ask for deferred
* probe if needed before we went too far with resource allocation.
*/
- irq = platform_get_irq_byname(pdev, "macirq");
- if (irq < 0) {
- if (irq != -EPROBE_DEFER) {
+ stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res.irq < 0) {
+ if (stmmac_res.irq != -EPROBE_DEFER) {
dev_err(dev,
"MAC IRQ configuration information not found\n");
}
- return irq;
+ return stmmac_res.irq;
}
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -298,21 +287,21 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* In case the wake up interrupt is not passed from the platform
* so the driver will continue to use the mac irq (ndev->irq)
*/
- wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (wol_irq < 0) {
- if (wol_irq == -EPROBE_DEFER)
+ stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (stmmac_res.wol_irq < 0) {
+ if (stmmac_res.wol_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
- wol_irq = irq;
+ stmmac_res.wol_irq = stmmac_res.irq;
}
- lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
- if (lpi_irq == -EPROBE_DEFER)
+ stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (stmmac_res.lpi_irq == -EPROBE_DEFER)
return -EPROBE_DEFER;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
+ stmmac_res.addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(stmmac_res.addr))
+ return PTR_ERR(stmmac_res.addr);
plat_dat = dev_get_platdata(&pdev->dev);
@@ -332,7 +321,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
- ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+ ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
return ret;
@@ -353,27 +342,9 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
return ret;
}
- priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
- if (IS_ERR(priv)) {
- pr_err("%s: main driver probe failed", __func__);
- return PTR_ERR(priv);
- }
-
- /* Copy IRQ values to priv structure which is now avaialble */
- priv->dev->irq = irq;
- priv->wol_irq = wol_irq;
- priv->lpi_irq = lpi_irq;
-
- /* Get MAC address if available (DT) */
- if (mac)
- memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
-
- platform_set_drvdata(pdev, priv->dev);
-
- pr_debug("STMMAC platform driver registration completed");
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
/**
* stmmac_pltfr_remove
@@ -381,7 +352,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* Description: this function calls the main to free the net resources
* and calls the platforms hook and release the resources (e.g. mem).
*/
-static int stmmac_pltfr_remove(struct platform_device *pdev)
+int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -395,6 +366,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
return ret;
}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
#ifdef CONFIG_PM_SLEEP
/**
@@ -438,21 +410,6 @@ static int stmmac_pltfr_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
- stmmac_pltfr_suspend, stmmac_pltfr_resume);
-
-static struct platform_driver stmmac_pltfr_driver = {
- .probe = stmmac_pltfr_probe,
- .remove = stmmac_pltfr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .pm = &stmmac_pltfr_pm_ops,
- .of_match_table = of_match_ptr(stmmac_dt_ids),
- },
-};
-
-module_platform_driver(stmmac_pltfr_driver);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
-MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
-MODULE_LICENSE("GPL");
+SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
+ stmmac_pltfr_resume);
+EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 093eb99e5ffd..71da86d7bd00 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,11 +19,8 @@
#ifndef __STMMAC_PLATFORM_H__
#define __STMMAC_PLATFORM_H__
-extern const struct stmmac_of_data meson6_dwmac_data;
-extern const struct stmmac_of_data sun7i_gmac_data;
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-extern const struct stmmac_of_data socfpga_gmac_data;
-extern const struct stmmac_of_data rk3288_gmac_data;
+int stmmac_pltfr_probe(struct platform_device *pdev);
+int stmmac_pltfr_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b536b4c82752..462820514fae 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1361,7 +1361,6 @@ static int cpsw_ndo_stop(struct net_device *ndev)
if (cpsw_common_res_usage_state(priv) <= 1) {
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_ctlr_stop(priv->dma);
cpsw_ale_stop(priv->ale);
}
@@ -1456,7 +1455,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V2_RX_TS_BITS;
- break;
+ break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
@@ -1466,7 +1465,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V3_RX_TS_BITS;
- break;
+ break;
}
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1589,10 +1588,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
cpdma_chan_start(priv->txch);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
@@ -1629,10 +1626,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpsw_rx_interrupt(priv->irqs_table[0], priv);
cpsw_tx_interrupt(priv->irqs_table[1], priv);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 6e927b4583aa..43b061bd8e07 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -268,39 +268,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
}
EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
-static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
- int port_mask)
-{
- int port;
-
- port = cpsw_ale_get_port_num(ale_entry);
- if ((BIT(port) & port_mask) == 0)
- return; /* ports dont intersect, not interested */
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-}
-
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
-{
- u32 ale_entry[ALE_ENTRY_WORDS];
- int ret, idx;
-
- for (idx = 0; idx < ale->params.ale_entries; idx++) {
- cpsw_ale_read(ale, idx, ale_entry);
- ret = cpsw_ale_get_entry_type(ale_entry);
- if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
- continue;
-
- if (cpsw_ale_get_mcast(ale_entry))
- cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
- else
- cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
-
- cpsw_ale_write(ale, idx, ale_entry);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush);
-
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
{
@@ -752,18 +719,6 @@ static void cpsw_ale_timer(unsigned long arg)
}
}
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
-{
- del_timer_sync(&ale->timer);
- ale->ageout = ageout * HZ;
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
-
void cpsw_ale_start(struct cpsw_ale *ale)
{
u32 rev;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index af1e7ecd87c6..a7001894f3da 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -90,8 +90,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 43efc3a0cda5..5ec4ed3f6c8d 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -537,7 +537,7 @@ int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
static void netcp_frag_free(bool is_frag, void *ptr)
{
if (is_frag)
- put_page(virt_to_head_page(ptr));
+ skb_free_frag(ptr);
else
kfree(ptr);
}
@@ -698,7 +698,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
}
}
- netcp->ndev->last_rx = jiffies;
netcp->ndev->stats.rx_packets++;
netcp->ndev->stats.rx_bytes += skb->len;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 3d8f60d9643e..6f0a4495c7f3 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -721,9 +721,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
if (!hash_default)
__inv_buffer(buf, len);
- /* ISSUE: Is this needed? */
- dev->last_rx = jiffies;
-
#ifdef TILE_NET_DUMP_PACKETS
dump_packet(buf, len, "rx");
#endif /* TILE_NET_DUMP_PACKETS */
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ac62a5e248b0..79f0ec4e51ac 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -102,6 +102,18 @@ static void gelic_card_get_ether_port_status(struct gelic_card *card,
}
}
+/**
+ * gelic_descr_get_status -- returns the status of a descriptor
+ * @descr: descriptor to look at
+ *
+ * returns the status as in the dmac_cmd_status field of the descriptor
+ */
+static enum gelic_descr_dma_status
+gelic_descr_get_status(struct gelic_descr *descr)
+{
+ return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
+}
+
static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
{
int status;
@@ -278,18 +290,6 @@ void gelic_card_down(struct gelic_card *card)
}
/**
- * gelic_descr_get_status -- returns the status of a descriptor
- * @descr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static enum gelic_descr_dma_status
-gelic_descr_get_status(struct gelic_descr *descr)
-{
- return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
-}
-
-/**
* gelic_card_free_chain - free descriptor chain
* @card: card structure
* @descr_in: address of desc
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 8e9371a3388a..3c54a2cae5df 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -604,8 +604,7 @@ spider_net_set_multi(struct net_device *netdev)
int i;
u32 reg;
struct spider_net_card *card = netdev_priv(netdev);
- unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
- {0, };
+ DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
spider_net_set_promisc(card);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index de2850497c09..725106f75d42 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -472,8 +472,7 @@ struct rhine_private {
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
- struct rx_desc *rx_head_desc;
- unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_rx;
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct rhine_stats rx_stats;
@@ -1213,17 +1212,61 @@ static void free_ring(struct net_device* dev)
}
-static void alloc_rbufs(struct net_device *dev)
+struct rhine_skb_dma {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+};
+
+static inline int rhine_skb_dma_init(struct net_device *dev,
+ struct rhine_skb_dma *sd)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- dma_addr_t next;
+ const int size = rp->rx_buf_sz;
+
+ sd->skb = netdev_alloc_skb(dev, size);
+ if (!sd->skb)
+ return -ENOMEM;
+
+ sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
+ netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
+ dev_kfree_skb_any(sd->skb);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void rhine_reset_rbufs(struct rhine_private *rp)
+{
int i;
- rp->dirty_rx = rp->cur_rx = 0;
+ rp->cur_rx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+ rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+}
+
+static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
+ struct rhine_skb_dma *sd, int entry)
+{
+ rp->rx_skbuff_dma[entry] = sd->dma;
+ rp->rx_skbuff[entry] = sd->skb;
+
+ rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
+ dma_wmb();
+}
+
+static void free_rbufs(struct net_device* dev);
+
+static int alloc_rbufs(struct net_device *dev)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ dma_addr_t next;
+ int rc, i;
rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- rp->rx_head_desc = &rp->rx_ring[0];
next = rp->rx_ring_dma;
/* Init the ring entries */
@@ -1239,23 +1282,20 @@ static void alloc_rbufs(struct net_device *dev)
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[i] = skb;
- if (skb == NULL)
- break;
+ struct rhine_skb_dma sd;
- rp->rx_skbuff_dma[i] =
- dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
- rp->rx_skbuff_dma[i] = 0;
- dev_kfree_skb(skb);
- break;
+ rc = rhine_skb_dma_init(dev, &sd);
+ if (rc < 0) {
+ free_rbufs(dev);
+ goto out;
}
- rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
- rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+
+ rhine_skb_dma_nic_store(rp, &sd, i);
}
- rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ rhine_reset_rbufs(rp);
+out:
+ return rc;
}
static void free_rbufs(struct net_device* dev)
@@ -1659,16 +1699,18 @@ static int rhine_open(struct net_device *dev)
rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
- return rc;
+ goto out;
netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
- if (rc) {
- free_irq(rp->irq, dev);
- return rc;
- }
- alloc_rbufs(dev);
+ if (rc < 0)
+ goto out_free_irq;
+
+ rc = alloc_rbufs(dev);
+ if (rc < 0)
+ goto out_free_ring;
+
alloc_tbufs(dev);
rhine_chip_reset(dev);
rhine_task_enable(rp);
@@ -1680,7 +1722,14 @@ static int rhine_open(struct net_device *dev)
netif_start_queue(dev);
- return 0;
+out:
+ return rc;
+
+out_free_ring:
+ free_ring(dev);
+out_free_irq:
+ free_irq(rp->irq, dev);
+ goto out;
}
static void rhine_reset_task(struct work_struct *work)
@@ -1700,9 +1749,9 @@ static void rhine_reset_task(struct work_struct *work)
/* clear all descriptors */
free_tbufs(dev);
- free_rbufs(dev);
alloc_tbufs(dev);
- alloc_rbufs(dev);
+
+ rhine_reset_rbufs(rp);
/* Reinitialize the hardware. */
rhine_chip_reset(dev);
@@ -1730,6 +1779,11 @@ static void rhine_tx_timeout(struct net_device *dev)
schedule_work(&rp->reset_task);
}
+static inline bool rhine_tx_queue_full(struct rhine_private *rp)
+{
+ return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
+}
+
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1800,11 +1854,17 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
netdev_sent_queue(dev, skb->len);
/* lock eth irq */
- wmb();
+ dma_wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
rp->cur_tx++;
+ /*
+ * Nobody wants cur_tx write to rot for ages after the NIC will have
+ * seen the transmit request, especially as the transmit completion
+ * handler could miss it.
+ */
+ smp_wmb();
/* Non-x86 Todo: explicitly flush cache lines here. */
@@ -1817,8 +1877,14 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
ioaddr + ChipCmd1);
IOSYNC;
- if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+ /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
+ if (rhine_tx_queue_full(rp)) {
netif_stop_queue(dev);
+ smp_rmb();
+ /* Rejuvenate. */
+ if (!rhine_tx_queue_full(rp))
+ netif_wake_queue(dev);
+ }
netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
rp->cur_tx - 1, entry);
@@ -1866,13 +1932,24 @@ static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ unsigned int dirty_tx = rp->dirty_tx;
+ unsigned int cur_tx;
struct sk_buff *skb;
+ /*
+ * The race with rhine_start_tx does not matter here as long as the
+ * driver enforces a value of cur_tx that was relevant when the
+ * packet was scheduled to the network chipset.
+ * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
+ */
+ smp_rmb();
+ cur_tx = rp->cur_tx;
/* find and cleanup dirty tx descriptors */
- while (rp->dirty_tx != rp->cur_tx) {
- txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+ while (dirty_tx != cur_tx) {
+ unsigned int entry = dirty_tx % TX_RING_SIZE;
+ u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+
netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
entry, txstatus);
if (txstatus & DescOwn)
@@ -1921,12 +1998,23 @@ static void rhine_tx(struct net_device *dev)
pkts_compl++;
dev_consume_skb_any(skb);
rp->tx_skbuff[entry] = NULL;
- entry = (++rp->dirty_tx) % TX_RING_SIZE;
+ dirty_tx++;
}
+ rp->dirty_tx = dirty_tx;
+ /* Pity we can't rely on the nearby BQL completion implicit barrier. */
+ smp_wmb();
+
netdev_completed_queue(dev, pkts_compl, bytes_compl);
- if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+
+ /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
+ if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
netif_wake_queue(dev);
+ smp_rmb();
+ /* Rejuvenate. */
+ if (rhine_tx_queue_full(rp))
+ netif_stop_queue(dev);
+ }
}
/**
@@ -1944,22 +2032,33 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
return be16_to_cpup((__be16 *)trailer);
}
+static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
+ int data_size)
+{
+ dma_rmb();
+ if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
+ u16 vlan_tci;
+
+ vlan_tci = rhine_get_vlan_tci(skb, data_size);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ }
+}
+
/* Process up to limit frames from receive ring */
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
struct device *hwdev = dev->dev.parent;
- int count;
int entry = rp->cur_rx % RX_RING_SIZE;
+ int count;
netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
- entry, le32_to_cpu(rp->rx_head_desc->rx_status));
+ entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
/* If EOP is set on the next entry, it's a new packet. Send it up. */
for (count = 0; count < limit; ++count) {
- struct rx_desc *desc = rp->rx_head_desc;
+ struct rx_desc *desc = rp->rx_ring + entry;
u32 desc_status = le32_to_cpu(desc->rx_status);
- u32 desc_length = le32_to_cpu(desc->desc_length);
int data_size = desc_status >> 16;
if (desc_status & DescOwn)
@@ -1975,10 +2074,6 @@ static int rhine_rx(struct net_device *dev, int limit)
"entry %#x length %d status %08x!\n",
entry, data_size,
desc_status);
- netdev_warn(dev,
- "Oversized Ethernet frame %p vs %p\n",
- rp->rx_head_desc,
- &rp->rx_ring[entry]);
dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
@@ -2000,16 +2095,17 @@ static int rhine_rx(struct net_device *dev, int limit)
}
}
} else {
- struct sk_buff *skb = NULL;
/* Length should omit the CRC */
int pkt_len = data_size - 4;
- u16 vlan_tci = 0;
+ struct sk_buff *skb;
/* Check if the packet is long enough to accept without
copying to a minimally-sized skbuff. */
- if (pkt_len < rx_copybreak)
+ if (pkt_len < rx_copybreak) {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
- if (skb) {
+ if (unlikely(!skb))
+ goto drop;
+
dma_sync_single_for_cpu(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
@@ -2018,32 +2114,31 @@ static int rhine_rx(struct net_device *dev, int limit)
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
- skb_put(skb, pkt_len);
+
dma_sync_single_for_device(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
DMA_FROM_DEVICE);
} else {
+ struct rhine_skb_dma sd;
+
+ if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
+ goto drop;
+
skb = rp->rx_skbuff[entry];
- if (skb == NULL) {
- netdev_err(dev, "Inconsistent Rx descriptor chain\n");
- break;
- }
- rp->rx_skbuff[entry] = NULL;
- skb_put(skb, pkt_len);
+
dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
DMA_FROM_DEVICE);
+ rhine_skb_dma_nic_store(rp, &sd, entry);
}
- if (unlikely(desc_length & DescTag))
- vlan_tci = rhine_get_vlan_tci(skb, data_size);
-
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
- if (unlikely(desc_length & DescTag))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rhine_rx_vlan_tag(skb, desc, data_size);
+
netif_receive_skb(skb);
u64_stats_update_begin(&rp->rx_stats.syncp);
@@ -2051,35 +2146,16 @@ static int rhine_rx(struct net_device *dev, int limit)
rp->rx_stats.packets++;
u64_stats_update_end(&rp->rx_stats.syncp);
}
+give_descriptor_to_nic:
+ desc->rx_status = cpu_to_le32(DescOwn);
entry = (++rp->cur_rx) % RX_RING_SIZE;
- rp->rx_head_desc = &rp->rx_ring[entry];
- }
-
- /* Refill the Rx ring buffers. */
- for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
- struct sk_buff *skb;
- entry = rp->dirty_rx % RX_RING_SIZE;
- if (rp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
- rp->rx_skbuff[entry] = skb;
- if (skb == NULL)
- break; /* Better luck next round. */
- rp->rx_skbuff_dma[entry] =
- dma_map_single(hwdev, skb->data,
- rp->rx_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(hwdev,
- rp->rx_skbuff_dma[entry])) {
- dev_kfree_skb(skb);
- rp->rx_skbuff_dma[entry] = 0;
- break;
- }
- rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
- }
- rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
}
return count;
+
+drop:
+ dev->stats.rx_dropped++;
+ goto give_descriptor_to_nic;
}
static void rhine_restart_tx(struct net_device *dev) {
@@ -2484,9 +2560,8 @@ static int rhine_resume(struct device *device)
enable_mmio(rp->pioaddr, rp->quirks);
rhine_power_init(dev);
free_tbufs(dev);
- free_rbufs(dev);
alloc_tbufs(dev);
- alloc_rbufs(dev);
+ rhine_reset_rbufs(rp);
rhine_task_enable(rp);
spin_lock_bh(&rp->lock);
init_registers(dev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index af2694dc6f90..5a1068df7038 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -62,12 +62,12 @@
u32 temac_ior(struct temac_local *lp, int offset)
{
- return in_be32((u32 *)(lp->regs + offset));
+ return in_be32(lp->regs + offset);
}
void temac_iow(struct temac_local *lp, int offset, u32 value)
{
- out_be32((u32 *) (lp->regs + offset), value);
+ out_be32(lp->regs + offset, value);
}
int temac_indirect_busywait(struct temac_local *lp)
@@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
*/
static u32 temac_dma_in32(struct temac_local *lp, int reg)
{
- return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+ return in_be32(lp->sdma_regs + (reg << 2));
}
/**
@@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg)
*/
static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
{
- out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+ out_be32(lp->sdma_regs + (reg << 2), value);
}
/* DMA register access functions can be DCR based or memory mapped.
@@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_unlock(&lp->indirect_mutex);
}
-struct temac_option {
+static struct temac_option {
int flg;
u32 opt;
u32 reg;
@@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev)
ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void temac_adjust_link(struct net_device *ndev)
+static void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
@@ -688,10 +688,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev)) {
+ if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 4c9b4fa1d3c1..7cb9abac95c8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -11,16 +11,16 @@
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/if_vlan.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
-#define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */
#define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
#define XAE_MTU 1500 /* Max MTU of an Ethernet frame */
#define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
#define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
-#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
#define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
/* Configuration options */
@@ -38,18 +38,21 @@
#define XAE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
- * stripped. Default: disabled (set) */
+ * stripped. Default: disabled (set)
+ */
#define XAE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * Default: enabled (set) */
+ * Default: enabled (set)
+ */
#define XAE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
* set, the MAC will filter frames that have a mismatched type/length field
* and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
* types of frames are encountered. When this option is cleared, the MAC will
- * allow these types of frames to be received. Default: enabled (set) */
+ * allow these types of frames to be received. Default: enabled (set)
+ */
#define XAE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter. Default: enabled (set) */
@@ -159,12 +162,12 @@
#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
#define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */
-#define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending
- * register offset */
-#define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable
- * register offset */
-#define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear
- * register offset. */
+/* MII Mgmt Interrupt Pending register offset */
+#define XAE_MDIO_MIP_OFFSET 0x00000620
+/* MII Management Interrupt Enable register offset */
+#define XAE_MDIO_MIE_OFFSET 0x00000640
+/* MII Management Interrupt Clear register offset. */
+#define XAE_MDIO_MIC_OFFSET 0x00000660
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
@@ -176,18 +179,17 @@
#define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
/* Bit Masks for Axi Ethernet RAF register */
-#define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast
- * destination address */
-#define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast
- * destination address */
+/* Reject receive multicast destination address */
+#define XAE_RAF_MCSTREJ_MASK 0x00000002
+/* Reject receive broadcast destination address */
+#define XAE_RAF_BCSTREJ_MASK 0x00000004
#define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */
#define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */
#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
-#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast
- * Filtering mode
- */
+/* Exteneded Multicast Filtering mode */
+#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000
#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */
#define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */
@@ -197,15 +199,16 @@
/* Bit Masks for Axi Ethernet TPF and IFGP registers */
#define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */
-#define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame
- * gap adjustment value */
+/* Transmit inter-frame gap adjustment value */
+#define XAE_IFGP0_IFGP_MASK 0x0000007F
/* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
- * for all 3 registers. */
-#define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access
- * complete */
-#define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation
- * complete */
+ * for all 3 registers.
+ */
+/* Hard register access complete */
+#define XAE_INT_HARDACSCMPLT_MASK 0x00000001
+/* Auto negotiation complete */
+#define XAE_INT_AUTONEG_MASK 0x00000002
#define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */
#define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */
#define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */
@@ -215,10 +218,9 @@
#define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */
#define XAE_INT_ALL_MASK 0x0000003F /* All the ints */
+/* INT bits that indicate receive errors */
#define XAE_INT_RECV_ERROR_MASK \
- (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
- * indicate receive
- * errors */
+ (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK)
/* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
#define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */
@@ -231,27 +233,28 @@
/* Bit masks for Axi Ethernet RCW1 register */
#define XAE_RCW1_RST_MASK 0x80000000 /* Reset */
#define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */
-#define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable
- * (FCS not stripped) */
+/* In-Band FCS enable (FCS not stripped) */
+#define XAE_RCW1_FCS_MASK 0x20000000
#define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */
#define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */
-#define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check
- * disable */
-#define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check
- * disable */
-#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
- * bits [47:32]. Bits [31:0] are
- * stored in register RCW0 */
+/* Length/type field valid check disable */
+#define XAE_RCW1_LT_DIS_MASK 0x02000000
+/* Control frame Length check disable */
+#define XAE_RCW1_CL_DIS_MASK 0x01000000
+/* Pause frame source address bits [47:32]. Bits [31:0] are
+ * stored in register RCW0
+ */
+#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet TC register */
#define XAE_TC_RST_MASK 0x80000000 /* Reset */
#define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */
-#define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable
- * (FCS not generated) */
+/* In-Band FCS enable (FCS not generated) */
+#define XAE_TC_FCS_MASK 0x20000000
#define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */
#define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */
-#define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment
- * enable */
+/* Inter-frame gap adjustment enable */
+#define XAE_TC_IFG_MASK 0x02000000
/* Bit masks for Axi Ethernet FCC register */
#define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */
@@ -301,10 +304,10 @@
#define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */
/* Bit masks for Axi Ethernet UAW1 register */
-#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits
- * [47:32]; Station address
- * bits [31:0] are stored in
- * register UAW0 */
+/* Station address bits [47:32]; Station address
+ * bits [31:0] are stored in register UAW0
+ */
+#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
/* Bit masks for Axi Ethernet FMI register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
@@ -320,8 +323,8 @@
#define XAE_PHY_TYPE_SGMII 4
#define XAE_PHY_TYPE_1000BASE_X 5
-#define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the
- * hardware multicast table. */
+ /* Total number of entries in the hardware multicast table. */
+#define XAE_MULTICAST_CAM_TABLE_NUM 4
/* Axi Ethernet Synthesis features */
#define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0)
@@ -407,8 +410,11 @@ struct axidma_bd {
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
- * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
- * can handle jumbo packets, this entry will be 1, else 0.
+ * @rxmem: Stores rx memory size for jumbo frame handling.
+ * @csum_offload_on_tx_path: Stores the checksum selection on TX side.
+ * @csum_offload_on_rx_path: Stores the checksum selection on RX side.
+ * @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_count_tx: Store the irq coalesce on TX side.
*/
struct axienet_local {
struct net_device *ndev;
@@ -446,7 +452,7 @@ struct axienet_local {
u32 rx_bd_ci;
u32 max_frm_size;
- u32 jumbo_support;
+ u32 rxmem;
int csum_offload_on_tx_path;
int csum_offload_on_rx_path;
@@ -472,7 +478,7 @@ struct axienet_option {
* @lp: Pointer to axienet local structure
* @offset: Address offset from the base address of Axi Ethernet core
*
- * returns: The contents of the Axi Ethernet register
+ * Return: The contents of the Axi Ethernet register
*
* This function returns the contents of the corresponding register.
*/
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 28b7e7d9c272..4208dd7ef101 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -117,7 +117,7 @@ static struct axienet_option axienet_options[] = {
* @lp: Pointer to axienet local structure
* @reg: Address offset from the base address of the Axi DMA core
*
- * returns: The contents of the Axi DMA register
+ * Return: The contents of the Axi DMA register
*
* This function returns the contents of the corresponding Axi DMA register.
*/
@@ -179,8 +179,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
- * returns: 0, on success
- * -ENOMEM, on failure
+ * Return: 0, on success -ENOMEM, on failure
*
* This function is called to initialize the Rx and Tx DMA descriptor
* rings. This initializes the descriptors with required default values
@@ -198,9 +197,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /*
- * Allocate the Tx and Rx buffer descriptors.
- */
+ /* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL);
@@ -263,7 +260,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -273,7 +271,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting.
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -320,7 +319,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
* @ndev: Pointer to the net_device structure
* @p: 6 byte Address to be written as MAC address
*
- * returns: 0 for all conditions. Presently, there is no failure case.
+ * Return: 0 for all conditions. Presently, there is no failure case.
*
* This function is called to initialize the MAC address of the Axi Ethernet
* core. It calls the core specific axienet_set_mac_address. This is the
@@ -354,7 +353,8 @@ static void axienet_set_multicast_list(struct net_device *ndev)
netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
/* We must make the kernel realize we had to move into
* promiscuous mode. If it was a promiscuous mode request
- * the flag is already set. If not we set it. */
+ * the flag is already set. If not we set it.
+ */
ndev->flags |= IFF_PROMISC;
reg = axienet_ior(lp, XAE_FMI_OFFSET);
reg |= XAE_FMI_PM_MASK;
@@ -438,14 +438,15 @@ static void __axienet_device_reset(struct axienet_local *lp,
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
- * reset process. */
+ * reset process.
+ */
axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
- dev_err(dev, "axienet_device_reset DMA "
- "reset timeout!\n");
+ netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+ __func__);
break;
}
}
@@ -471,19 +472,21 @@ static void axienet_device_reset(struct net_device *ndev)
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+ lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU) &&
- (lp->jumbo_support)) {
- lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
- XAE_TRL_SIZE;
- lp->options |= XAE_OPTION_JUMBO;
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
+ lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE;
+
+ if (lp->max_frm_size <= lp->rxmem)
+ lp->options |= XAE_OPTION_JUMBO;
}
if (axienet_dma_bd_init(ndev)) {
- dev_err(&ndev->dev, "axienet_device_reset descriptor "
- "allocation failed\n");
+ netdev_err(ndev, "%s: descriptor allocation failed\n",
+ __func__);
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -497,7 +500,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -558,8 +562,8 @@ static void axienet_adjust_link(struct net_device *ndev)
lp->last_link = link_state;
phy_print_status(phy);
} else {
- dev_err(&ndev->dev, "Error setting Axi Ethernet "
- "mac speed\n");
+ netdev_err(ndev,
+ "Error setting Axi Ethernet mac speed\n");
}
}
}
@@ -617,7 +621,7 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
*
- * returns: 0, on success
+ * Return: 0, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked before BDs are allocated and transmission starts.
@@ -640,7 +644,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
* @skb: sk_buff pointer that contains data to be Txed.
* @ndev: Pointer to net_device structure.
*
- * returns: NETDEV_TX_OK, on success
+ * Return: NETDEV_TX_OK, on success
* NETDEV_TX_BUSY, if any of the descriptors are not free
*
* This function is invoked from upper layers to initiate transmission. The
@@ -726,15 +730,15 @@ static void axienet_recv(struct net_device *ndev)
u32 csumstatus;
u32 size = 0;
u32 packets = 0;
- dma_addr_t tail_p;
+ dma_addr_t tail_p = 0;
struct axienet_local *lp = netdev_priv(ndev);
struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
skb = (struct sk_buff *) (cur_p->sw_id_offset);
length = cur_p->app4 & 0x0000FFFF;
@@ -786,7 +790,8 @@ static void axienet_recv(struct net_device *ndev)
ndev->stats.rx_packets += packets;
ndev->stats.rx_bytes += size;
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ if (tail_p)
+ axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
}
/**
@@ -794,7 +799,7 @@ static void axienet_recv(struct net_device *ndev)
* @irq: irq number
* @_ndev: net_device pointer
*
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -808,6 +813,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
axienet_start_xmit_done(lp->ndev);
goto out;
}
@@ -831,9 +837,9 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -842,7 +848,7 @@ out:
* @irq: irq number
* @_ndev: net_device pointer
*
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -856,6 +862,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
axienet_recv(lp->ndev);
goto out;
}
@@ -879,9 +886,9 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
tasklet_schedule(&lp->dma_err_tasklet);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
}
out:
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
return IRQ_HANDLED;
}
@@ -891,7 +898,7 @@ static void axienet_dma_err_handler(unsigned long data);
* axienet_open - Driver open routine.
* @ndev: Pointer to net_device structure
*
- * returns: 0, on success.
+ * Return: 0, on success.
* -ENODEV, if PHY cannot be connected to
* non-zero error value on failure
*
@@ -914,7 +921,8 @@ static int axienet_open(struct net_device *ndev)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
(mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
axienet_device_reset(ndev);
@@ -925,14 +933,20 @@ static int axienet_open(struct net_device *ndev)
return ret;
if (lp->phy_node) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ if (lp->phy_type == XAE_PHY_TYPE_GMII) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
axienet_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
- if (!lp->phy_dev) {
- dev_err(lp->dev, "of_phy_connect() failed\n");
- return -ENODEV;
+ } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
}
- phy_start(lp->phy_dev);
+
+ if (!lp->phy_dev)
+ dev_err(lp->dev, "of_phy_connect() failed\n");
+ else
+ phy_start(lp->phy_dev);
}
/* Enable tasklets for Axi DMA error handling */
@@ -965,7 +979,7 @@ err_tx_irq:
* axienet_stop - Driver stop routine.
* @ndev: Pointer to net_device structure
*
- * returns: 0, on success.
+ * Return: 0, on success.
*
* This is the driver stop routine. It calls phy_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
@@ -1005,7 +1019,7 @@ static int axienet_stop(struct net_device *ndev)
* @ndev: Pointer to net_device structure
* @new_mtu: New mtu value to be applied
*
- * returns: Always returns 0 (success).
+ * Return: Always returns 0 (success).
*
* This is the change mtu driver routine. It checks if the Axi Ethernet
* hardware supports jumbo frames before changing the mtu. This can be
@@ -1017,15 +1031,15 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- if (lp->jumbo_support) {
- if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- } else {
- if ((new_mtu > XAE_MTU) || (new_mtu < 64))
- return -EINVAL;
- ndev->mtu = new_mtu;
- }
+
+ if ((new_mtu + VLAN_ETH_HLEN +
+ XAE_TRL_SIZE) > lp->rxmem)
+ return -EINVAL;
+
+ if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+ return -EINVAL;
+
+ ndev->mtu = new_mtu;
return 0;
}
@@ -1072,6 +1086,8 @@ static const struct net_device_ops axienet_netdev_ops = {
* not be found, the function returns -ENODEV. This function calls the
* relevant PHY ethtool API to get the PHY settings.
* Issue "ethtool ethX" under linux prompt to execute this function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
*/
static int axienet_ethtools_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
@@ -1093,6 +1109,8 @@ static int axienet_ethtools_get_settings(struct net_device *ndev,
* relevant PHY ethtool API to set the PHY.
* Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
* function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
*/
static int axienet_ethtools_set_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
@@ -1127,6 +1145,8 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
*
* This implements ethtool command for getting the total register length
* information.
+ *
+ * Return: the total regs length
*/
static int axienet_ethtools_get_regs_len(struct net_device *ndev)
{
@@ -1213,11 +1233,13 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
* axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
* settings.
* @ndev: Pointer to net_device structure
- * @epauseparam:Pointer to ethtool_pauseparam structure
+ * @epauseparm:Pointer to ethtool_pauseparam structure
*
* This implements ethtool command for enabling flow control on Rx and Tx
* paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
* function.
+ *
+ * Return: 0 on success, -EFAULT if device is running
*/
static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
@@ -1227,8 +1249,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
return -EFAULT;
}
@@ -1254,6 +1276,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
* This implements ethtool command for getting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
* execute this function.
+ *
+ * Return: 0 always
*/
static int axienet_ethtools_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce)
@@ -1277,6 +1301,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
* This implements ethtool command for setting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
* prompt to execute this function.
+ *
+ * Return: 0, on success, Non-zero error value on failure.
*/
static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ecoalesce)
@@ -1284,8 +1310,8 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- printk(KERN_ERR "%s: Please stop netif before applying "
- "configruation\n", ndev->name);
+ netdev_err(ndev,
+ "Please stop netif before applying configuration\n");
return -EFAULT;
}
@@ -1354,7 +1380,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
* including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards. */
+ * process is started, MDIO will be broken afterwards.
+ */
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
@@ -1425,7 +1452,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.*/
+ * halted state. This will make the Rx side ready for reception.
+ */
axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -1435,7 +1463,8 @@ static void axienet_dma_err_handler(unsigned long data)
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting */
+ * tail pointer register that the Tx channel will start transmitting
+ */
axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -1451,7 +1480,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
- * transmitter disabled.*/
+ * transmitter disabled.
+ */
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
@@ -1460,11 +1490,10 @@ static void axienet_dma_err_handler(unsigned long data)
}
/**
- * axienet_of_probe - Axi Ethernet probe function.
- * @op: Pointer to platform device structure.
- * @match: Pointer to device id structure
+ * axienet_probe - Axi Ethernet probe function.
+ * @pdev: Pointer to platform device structure.
*
- * returns: 0, on success
+ * Return: 0, on success
* Non-zero error value on failure.
*
* This is the probe routine for Axi Ethernet driver. This is called before
@@ -1472,22 +1501,23 @@ static void axienet_dma_err_handler(unsigned long data)
* device. Parses through device tree and populates fields of
* axienet_local. It registers the Ethernet device.
*/
-static int axienet_of_probe(struct platform_device *op)
+static int axienet_probe(struct platform_device *pdev)
{
- __be32 *p;
- int size, ret = 0;
+ int ret;
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- const void *addr;
+ u8 mac_addr[6];
+ struct resource *ethres, dmares;
+ u32 value;
ndev = alloc_etherdev(sizeof(*lp));
if (!ndev)
return -ENOMEM;
- platform_set_drvdata(op, ndev);
+ platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &axienet_netdev_ops;
@@ -1495,21 +1525,23 @@ static int axienet_of_probe(struct platform_device *op)
lp = netdev_priv(ndev);
lp->ndev = ndev;
- lp->dev = &op->dev;
+ lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
/* Map device registers */
- lp->regs = of_iomap(op->dev.of_node, 0);
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
if (!lp->regs) {
- dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = -ENOMEM;
- goto nodev;
+ goto free_netdev;
}
+
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
+ if (!ret) {
+ switch (value) {
case 1:
lp->csum_offload_on_tx_path =
XAE_FEATURE_PARTIAL_TX_CSUM;
@@ -1528,9 +1560,9 @@ static int axienet_of_probe(struct platform_device *op)
lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
}
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
- if (p) {
- switch (be32_to_cpup(p)) {
+ ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
+ if (!ret) {
+ switch (value) {
case 1:
lp->csum_offload_on_rx_path =
XAE_FEATURE_PARTIAL_RX_CSUM;
@@ -1546,82 +1578,77 @@ static int axienet_of_probe(struct platform_device *op)
}
}
/* For supporting jumbo frames, the Axi Ethernet hardware must have
- * a larger Rx/Tx Memory. Typically, the size must be more than or
- * equal to 16384 bytes, so that we can enable jumbo option and start
- * supporting jumbo frames. Here we check for memory allocated for
- * Rx/Tx in the hardware from the device-tree and accordingly set
- * flags. */
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
- if (p) {
- if ((be32_to_cpup(p)) >= 0x4000)
- lp->jumbo_support = 1;
- }
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
- if (p)
- lp->phy_type = be32_to_cpup(p);
+ * a larger Rx/Tx Memory. Typically, the size must be large so that
+ * we can enable jumbo option and start supporting jumbo frames.
+ * Here we check for memory allocated for Rx/Tx in the hardware from
+ * the device-tree and accordingly set flags.
+ */
+ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&op->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto err_iounmap;
+ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
+ if (IS_ERR(np)) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ ret = PTR_ERR(np);
+ goto free_netdev;
}
- lp->dma_regs = of_iomap(np, 0);
- if (lp->dma_regs) {
- dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
- } else {
- dev_err(&op->dev, "unable to map DMA registers\n");
- of_node_put(np);
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get DMA resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+ if (!lp->dma_regs) {
+ dev_err(&pdev->dev, "could not map DMA regs\n");
+ ret = -ENOMEM;
+ goto free_netdev;
}
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
- dev_err(&op->dev, "could not determine irqs\n");
+ dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
- goto err_iounmap_2;
+ goto free_netdev;
}
/* Retrieve the MAC address */
- addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
- if ((!addr) || (size != 6)) {
- dev_err(&op->dev, "could not find MAC address\n");
- ret = -ENODEV;
- goto err_iounmap_2;
+ ret = of_property_read_u8_array(pdev->dev.of_node,
+ "local-mac-address", mac_addr, 6);
+ if (ret) {
+ dev_err(&pdev->dev, "could not find MAC address\n");
+ goto free_netdev;
}
- axienet_set_mac_address(ndev, (void *) addr);
+ axienet_set_mac_address(ndev, (void *)mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- ret = axienet_mdio_setup(lp, op->dev.of_node);
- if (ret)
- dev_warn(&op->dev, "error registering MDIO bus\n");
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node) {
+ ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ if (ret)
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ }
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto err_iounmap_2;
+ goto free_netdev;
}
return 0;
-err_iounmap_2:
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
-err_iounmap:
- iounmap(lp->regs);
-nodev:
+free_netdev:
free_netdev(ndev);
- ndev = NULL;
+
return ret;
}
-static int axienet_of_remove(struct platform_device *op)
+static int axienet_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
axienet_mdio_teardown(lp);
@@ -1630,24 +1657,21 @@ static int axienet_of_remove(struct platform_device *op)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
- iounmap(lp->regs);
- if (lp->dma_regs)
- iounmap(lp->dma_regs);
free_netdev(ndev);
return 0;
}
-static struct platform_driver axienet_of_driver = {
- .probe = axienet_of_probe,
- .remove = axienet_of_remove,
+static struct platform_driver axienet_driver = {
+ .probe = axienet_probe,
+ .remove = axienet_remove,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
},
};
-module_platform_driver(axienet_of_driver);
+module_platform_driver(axienet_driver);
MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
MODULE_AUTHOR("Xilinx");
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 3b67d60d4378..2a5a16834c01 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -37,7 +37,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
* @phy_id: Address of the PHY device
* @reg: PHY register to read
*
- * returns: The register contents on success, -ETIMEDOUT on a timeout
+ * Return: The register contents on success, -ETIMEDOUT on a timeout
*
* Reads the contents of the requested register from the requested PHY
* address by first writing the details into MCR register. After a while
@@ -80,7 +80,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* @reg: PHY register to write to
* @val: Value to be written into the register
*
- * returns: 0 on success, -ETIMEDOUT on a timeout
+ * Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
* into MWD register. The the MCR register is then appropriately setup
@@ -119,7 +119,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* @lp: Pointer to axienet local data structure.
* @np: Pointer to device node
*
- * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
* mdiobus_alloc (to allocate memory for mii bus structure) fails.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
@@ -161,19 +161,19 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
np1 = of_find_node_by_name(NULL, "cpu");
if (!np1) {
- printk(KERN_WARNING "%s(): Could not find CPU device node.",
- __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
goto issue;
}
property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
if (!property_p) {
- printk(KERN_WARNING "%s(): Could not find CPU property: "
- "clock-frequency.", __func__);
- printk(KERN_WARNING "Setting MDIO clock divisor to "
- "default %d\n", DEFAULT_CLOCK_DIVISOR);
+ netdev_warn(lp->ndev, "clock-frequency property not found.\n");
+ netdev_warn(lp->ndev,
+ "Setting MDIO clock divisor to default %d\n",
+ DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
of_node_put(np1);
goto issue;
@@ -183,12 +183,14 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
- * 1 to the clock divisor or we will surely be above 2.5 MHz */
+ * 1 to the clock divisor or we will surely be above 2.5 MHz
+ */
if (host_clock % (MAX_MDIO_FREQ * 2))
clk_div++;
- printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
- "on %u Hz host clock.\n", __func__, clk_div, host_clock);
+ netdev_dbg(lp->ndev,
+ "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
+ clk_div, host_clock);
of_node_put(np1);
issue:
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index cc27dea3414e..9956680402de 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
- DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
+ DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
DB_SMT("SRF: state SR%d Threshold %d\n",
smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
#ifdef DEBUG
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
new file mode 100644
index 000000000000..78d49d186e05
--- /dev/null
+++ b/drivers/net/geneve.c
@@ -0,0 +1,523 @@
+/*
+ * GENEVE: Generic Network Virtualization Encapsulation
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hash.h>
+#include <net/rtnetlink.h>
+#include <net/geneve.h>
+
+#define GENEVE_NETDEV_VER "0.6"
+
+#define GENEVE_UDP_PORT 6081
+
+#define GENEVE_N_VID (1u << 24)
+#define GENEVE_VID_MASK (GENEVE_N_VID - 1)
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-network namespace private data for this module */
+struct geneve_net {
+ struct list_head geneve_list;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Pseudo network device */
+struct geneve_dev {
+ struct hlist_node hlist; /* vni hash table */
+ struct net *net; /* netns for packet i/o */
+ struct net_device *dev; /* netdev for geneve tunnel */
+ struct geneve_sock *sock; /* socket used for geneve tunnel */
+ u8 vni[3]; /* virtual network ID for tunnel */
+ u8 ttl; /* TTL override */
+ u8 tos; /* TOS override */
+ struct sockaddr_in remote; /* IPv4 address for link partner */
+ struct list_head next; /* geneve's per namespace list */
+};
+
+static int geneve_net_id;
+
+static inline __u32 geneve_net_vni_hash(u8 vni[3])
+{
+ __u32 vnid;
+
+ vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
+ return hash_32(vnid, VNI_HASH_BITS);
+}
+
+/* geneve receive/decap routine */
+static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
+{
+ struct genevehdr *gnvh = geneve_hdr(skb);
+ struct geneve_dev *dummy, *geneve = NULL;
+ struct geneve_net *gn;
+ struct iphdr *iph = NULL;
+ struct pcpu_sw_netstats *stats;
+ struct hlist_head *vni_list_head;
+ int err = 0;
+ __u32 hash;
+
+ iph = ip_hdr(skb); /* Still outer IP header... */
+
+ gn = gs->rcv_data;
+
+ /* Find the device for this VNI */
+ hash = geneve_net_vni_hash(gnvh->vni);
+ vni_list_head = &gn->vni_list[hash];
+ hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+ if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
+ iph->saddr == dummy->remote.sin_addr.s_addr) {
+ geneve = dummy;
+ break;
+ }
+ }
+ if (!geneve)
+ goto drop;
+
+ /* Drop packets w/ critical options,
+ * since we don't support any...
+ */
+ if (gnvh->critical)
+ goto drop;
+
+ skb_reset_mac_header(skb);
+ skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
+ skb->protocol = eth_type_trans(skb, geneve->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+ /* Ignore packet loops (and multicast echo) */
+ if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+ goto drop;
+
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb); /* Now inner IP header... */
+ err = IP_ECN_decapsulate(iph, skb);
+
+ if (unlikely(err)) {
+ if (log_ecn_error)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &iph->saddr, iph->tos);
+ if (err > 1) {
+ ++geneve->dev->stats.rx_frame_errors;
+ ++geneve->dev->stats.rx_errors;
+ goto drop;
+ }
+ }
+
+ stats = this_cpu_ptr(geneve->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+
+ return;
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+}
+
+/* Setup stats when device is created */
+static int geneve_init(struct net_device *dev)
+{
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void geneve_uninit(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+}
+
+static int geneve_open(struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct net *net = geneve->net;
+ struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+ struct geneve_sock *gs;
+
+ gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
+ false, false);
+ if (IS_ERR(gs))
+ return PTR_ERR(gs);
+
+ geneve->sock = gs;
+
+ return 0;
+}
+
+static int geneve_stop(struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs = geneve->sock;
+
+ geneve_sock_release(gs);
+
+ return 0;
+}
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ struct geneve_sock *gs = geneve->sock;
+ struct rtable *rt = NULL;
+ const struct iphdr *iip; /* interior IP header */
+ struct flowi4 fl4;
+ int err;
+ __be16 sport;
+ __u8 tos, ttl;
+
+ iip = ip_hdr(skb);
+
+ skb_reset_mac_header(skb);
+
+ /* TODO: port min/max limits should be configurable */
+ sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
+
+ tos = geneve->tos;
+ if (tos == 1)
+ tos = ip_tunnel_get_dsfield(iip, skb);
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_tos = RT_TOS(tos);
+ fl4.daddr = geneve->remote.sin_addr.s_addr;
+ rt = ip_route_output_key(geneve->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (rt->dst.dev == dev) { /* is this necessary? */
+ netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
+ dev->stats.collisions++;
+ goto rt_tx_error;
+ }
+
+ tos = ip_tunnel_ecn_encap(tos, iip, skb);
+
+ ttl = geneve->ttl;
+ if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+ ttl = 1;
+
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+ /* no need to handle local destination and encap bypass...yet... */
+
+ err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
+ tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
+ geneve->vni, 0, NULL, false,
+ !net_eq(geneve->net, dev_net(geneve->dev)));
+ if (err < 0)
+ ip_rt_put(rt);
+
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+
+ return NETDEV_TX_OK;
+
+rt_tx_error:
+ ip_rt_put(rt);
+tx_error:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops geneve_netdev_ops = {
+ .ndo_init = geneve_init,
+ .ndo_uninit = geneve_uninit,
+ .ndo_open = geneve_open,
+ .ndo_stop = geneve_stop,
+ .ndo_start_xmit = geneve_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static void geneve_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+ strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+}
+
+static const struct ethtool_ops geneve_ethtool_ops = {
+ .get_drvinfo = geneve_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type geneve_type = {
+ .name = "geneve",
+};
+
+/* Initialize the device structure. */
+static void geneve_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &geneve_netdev_ops;
+ dev->ethtool_ops = &geneve_ethtool_ops;
+ dev->destructor = free_netdev;
+
+ SET_NETDEV_DEVTYPE(dev, &geneve_type);
+
+ dev->tx_queue_len = 0;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+
+ dev->vlan_features = dev->features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+ netif_keep_dst(dev);
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+}
+
+static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+ [IFLA_GENEVE_ID] = { .type = NLA_U32 },
+ [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_GENEVE_TTL] = { .type = NLA_U8 },
+ [IFLA_GENEVE_TOS] = { .type = NLA_U8 },
+};
+
+static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+
+ if (!data)
+ return -EINVAL;
+
+ if (data[IFLA_GENEVE_ID]) {
+ __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+
+ if (vni >= GENEVE_VID_MASK)
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ struct geneve_dev *dummy, *geneve = netdev_priv(dev);
+ struct hlist_head *vni_list_head;
+ struct sockaddr_in remote; /* IPv4 address for link partner */
+ __u32 vni, hash;
+ int err;
+
+ if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+ return -EINVAL;
+
+ geneve->net = net;
+ geneve->dev = dev;
+
+ vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+ geneve->vni[0] = (vni & 0x00ff0000) >> 16;
+ geneve->vni[1] = (vni & 0x0000ff00) >> 8;
+ geneve->vni[2] = vni & 0x000000ff;
+
+ geneve->remote.sin_addr.s_addr =
+ nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+ if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
+ return -EINVAL;
+
+ remote = geneve->remote;
+ hash = geneve_net_vni_hash(geneve->vni);
+ vni_list_head = &gn->vni_list[hash];
+ hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+ if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
+ !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
+ return -EBUSY;
+ }
+
+ if (tb[IFLA_ADDRESS] == NULL)
+ eth_hw_addr_random(dev);
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ if (data[IFLA_GENEVE_TTL])
+ geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+
+ if (data[IFLA_GENEVE_TOS])
+ geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+
+ list_add(&geneve->next, &gn->geneve_list);
+
+ hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+
+ return 0;
+}
+
+static void geneve_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+
+ if (!hlist_unhashed(&geneve->hlist))
+ hlist_del_rcu(&geneve->hlist);
+
+ list_del(&geneve->next);
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t geneve_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
+ nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ 0;
+}
+
+static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct geneve_dev *geneve = netdev_priv(dev);
+ __u32 vni;
+
+ vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
+ if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
+ goto nla_put_failure;
+
+ if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
+ geneve->remote.sin_addr.s_addr))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
+ nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops geneve_link_ops __read_mostly = {
+ .kind = "geneve",
+ .maxtype = IFLA_GENEVE_MAX,
+ .policy = geneve_policy,
+ .priv_size = sizeof(struct geneve_dev),
+ .setup = geneve_setup,
+ .validate = geneve_validate,
+ .newlink = geneve_newlink,
+ .dellink = geneve_dellink,
+ .get_size = geneve_get_size,
+ .fill_info = geneve_fill_info,
+};
+
+static __net_init int geneve_init_net(struct net *net)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ unsigned int h;
+
+ INIT_LIST_HEAD(&gn->geneve_list);
+
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&gn->vni_list[h]);
+
+ return 0;
+}
+
+static void __net_exit geneve_exit_net(struct net *net)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ struct geneve_dev *geneve, *next;
+ struct net_device *dev, *aux;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+
+ /* gather any geneve devices that were moved into this ns */
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &geneve_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
+ /* now gather any other geneve devices that were created in this ns */
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
+ /* If geneve->dev is in the same netns, it was already added
+ * to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(geneve->dev), net))
+ unregister_netdevice_queue(geneve->dev, &list);
+ }
+
+ /* unregister the devices gathered above */
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations geneve_net_ops = {
+ .init = geneve_init_net,
+ .exit = geneve_exit_net,
+ .id = &geneve_net_id,
+ .size = sizeof(struct geneve_net),
+};
+
+static int __init geneve_init_module(void)
+{
+ int rc;
+
+ rc = register_pernet_subsys(&geneve_net_ops);
+ if (rc)
+ goto out1;
+
+ rc = rtnl_link_register(&geneve_link_ops);
+ if (rc)
+ goto out2;
+
+ return 0;
+out2:
+ unregister_pernet_subsys(&geneve_net_ops);
+out1:
+ return rc;
+}
+late_initcall(geneve_init_module);
+
+static void __exit geneve_cleanup_module(void)
+{
+ rtnl_link_unregister(&geneve_link_ops);
+ unregister_pernet_subsys(&geneve_net_ops);
+}
+module_exit(geneve_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(GENEVE_NETDEV_VER);
+MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
+MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("geneve");
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 41071d32bc8e..dd4544085db3 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -161,6 +161,7 @@ struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
+ u32 max_num_vrss_chns;
};
enum rndis_device_state {
@@ -611,6 +612,12 @@ struct multi_send_data {
u32 count; /* counter of batched packets */
};
+struct netvsc_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@@ -618,6 +625,9 @@ struct net_device_context {
struct delayed_work dwork;
struct work_struct work;
u32 msg_enable; /* debug level */
+
+ struct netvsc_stats __percpu *tx_stats;
+ struct netvsc_stats __percpu *rx_stats;
};
/* Per netvsc device */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ea091bc5ff09..06de98a05622 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
+ int node;
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
ndev = net_device->ndev;
- net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+ node = cpu_to_node(device->channel->target_cpu);
+ net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
+ if (!net_device->recv_buf)
+ net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+
if (!net_device->recv_buf) {
netdev_err(ndev, "unable to allocate receive "
"buffer of size %d\n", net_device->recv_buf_size);
@@ -321,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device)
/* Now setup the send buffer.
*/
- net_device->send_buf = vzalloc(net_device->send_buf_size);
+ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
+ if (!net_device->send_buf)
+ net_device->send_buf = vzalloc(net_device->send_buf_size);
if (!net_device->send_buf) {
netdev_err(ndev, "unable to allocate send "
"buffer of size %d\n", net_device->send_buf_size);
@@ -743,6 +750,7 @@ static inline int netvsc_send_pkt(
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
+ u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (packet->is_data_pkt) {
@@ -769,32 +777,42 @@ static inline int netvsc_send_pkt(
if (out_channel->rescind)
return -ENODEV;
+ /*
+ * It is possible that once we successfully place this packet
+ * on the ringbuffer, we may stop the queue. In that case, we want
+ * to notify the host independent of the xmit_more flag. We don't
+ * need to be precise here; in the worst case we may signal the host
+ * unnecessarily.
+ */
+ if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
+ packet->xmit_more = false;
+
if (packet->page_buf_cnt) {
pgbuf = packet->cp_partial ? packet->page_buf +
packet->rmsg_pgcnt : packet->page_buf;
- ret = vmbus_sendpacket_pagebuffer(out_channel,
- pgbuf,
- packet->page_buf_cnt,
- &nvmsg,
- sizeof(struct nvsp_message),
- req_id);
+ ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
+ pgbuf,
+ packet->page_buf_cnt,
+ &nvmsg,
+ sizeof(struct nvsp_message),
+ req_id,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+ !packet->xmit_more);
} else {
- ret = vmbus_sendpacket(
- out_channel, &nvmsg,
- sizeof(struct nvsp_message),
- req_id,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
+ sizeof(struct nvsp_message),
+ req_id,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+ !packet->xmit_more);
}
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
atomic_inc(&net_device->queue_sends[q_idx]);
- if (hv_ringbuf_avail_percent(&out_channel->outbound) <
- RING_AVAIL_PERCENT_LOWATER) {
- netif_tx_stop_queue(netdev_get_tx_queue(
- ndev, q_idx));
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+ netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
if (atomic_read(&net_device->
queue_sends[q_idx]) < 1)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5993c7e2d723..358475ed9b59 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,6 +46,8 @@ static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+static int max_num_vrss_chns = 8;
+
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -196,12 +198,12 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
struct flow_keys flow;
int data_len;
- if (!skb_flow_dissect(skb, &flow) ||
- !(flow.n_proto == htons(ETH_P_IP) ||
- flow.n_proto == htons(ETH_P_IPV6)))
+ if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+ !(flow.basic.n_proto == htons(ETH_P_IP) ||
+ flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
- if (flow.ip_proto == IPPROTO_TCP)
+ if (flow.basic.ip_proto == IPPROTO_TCP)
data_len = 12;
else
data_len = 8;
@@ -391,7 +393,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 skb_length;
u32 pkt_sz;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
-
+ struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
/* We will atmost need two pages to describe the rndis
* header. We can only transmit MAX_PAGE_BUFFER_COUNT number
@@ -580,8 +582,10 @@ do_send:
drop:
if (ret == 0) {
- net->stats.tx_bytes += skb_length;
- net->stats.tx_packets++;
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets++;
+ tx_stats->bytes += skb_length;
+ u64_stats_update_end(&tx_stats->syncp);
} else {
if (ret != -EAGAIN) {
dev_kfree_skb_any(skb);
@@ -644,13 +648,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct ndis_tcp_ip_checksum_info *csum_info)
{
struct net_device *net;
+ struct net_device_context *net_device_ctx;
struct sk_buff *skb;
+ struct netvsc_stats *rx_stats;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
+ net_device_ctx = netdev_priv(net);
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
/* Allocate a skb - TODO direct I/O to pages? */
skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
@@ -686,8 +694,10 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb_record_rx_queue(skb, packet->channel->
offermsg.offer.sub_channel_index);
- net->stats.rx_packets++;
- net->stats.rx_bytes += packet->total_data_buflen;
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += packet->total_data_buflen;
+ u64_stats_update_end(&rx_stats->syncp);
/*
* Pass the skb back up. Network stack will deallocate the skb when it
@@ -747,12 +757,53 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
netif_tx_wake_all_queues(ndev);
return 0;
}
+static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+ struct rtnl_link_stats64 *t)
+{
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
+ cpu);
+ struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
+ cpu);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ tx_packets = tx_stats->packets;
+ tx_bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ rx_packets = rx_stats->packets;
+ rx_bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+ t->tx_bytes += tx_bytes;
+ t->tx_packets += tx_packets;
+ t->rx_bytes += rx_bytes;
+ t->rx_packets += rx_packets;
+ }
+
+ t->tx_dropped = net->stats.tx_dropped;
+ t->tx_errors = net->stats.tx_dropped;
+
+ t->rx_dropped = net->stats.rx_dropped;
+ t->rx_errors = net->stats.rx_errors;
+
+ return t;
+}
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
@@ -804,6 +855,7 @@ static const struct net_device_ops device_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
.ndo_select_queue = netvsc_select_queue,
+ .ndo_get_stats64 = netvsc_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netvsc_poll_controller,
#endif
@@ -855,6 +907,14 @@ static void netvsc_link_change(struct work_struct *w)
netdev_notify_peers(net);
}
+static void netvsc_free_netdev(struct net_device *netdev)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(netdev);
+
+ free_percpu(net_device_ctx->tx_stats);
+ free_percpu(net_device_ctx->rx_stats);
+ free_netdev(netdev);
+}
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
@@ -883,6 +943,18 @@ static int netvsc_probe(struct hv_device *dev,
netdev_dbg(net, "netvsc msg_enable: %d\n",
net_device_ctx->msg_enable);
+ net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+ if (!net_device_ctx->tx_stats) {
+ free_netdev(net);
+ return -ENOMEM;
+ }
+ net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+ if (!net_device_ctx->rx_stats) {
+ free_percpu(net_device_ctx->tx_stats);
+ free_netdev(net);
+ return -ENOMEM;
+ }
+
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
@@ -906,10 +978,11 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- free_netdev(net);
+ netvsc_free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
}
@@ -923,7 +996,7 @@ static int netvsc_probe(struct hv_device *dev,
if (ret != 0) {
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
- free_netdev(net);
+ netvsc_free_netdev(net);
} else {
schedule_delayed_work(&net_device_ctx->dwork, 0);
}
@@ -962,7 +1035,7 @@ static int netvsc_remove(struct hv_device *dev)
*/
rndis_filter_device_remove(dev);
- free_netdev(net);
+ netvsc_free_netdev(net);
return 0;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9118cea91882..006c1b8c2385 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
+ u32 num_rss_qs;
+ const struct cpumask *node_cpu_mask;
+ u32 num_possible_rss_qs;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
+ num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+
net_device->max_chn = rsscap.num_recv_que;
- net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
- num_online_cpus() : rsscap.num_recv_que;
+
+ /*
+ * We will limit the VRSS channels to the number CPUs in the NUMA node
+ * the primary channel is currently bound to.
+ */
+ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
+ num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+ net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
if (net_device->num_chn == 1)
goto out;
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1a3c3e57aa0b..1dd5ab8e5054 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -53,3 +53,13 @@ config IEEE802154_CC2520
This driver can also be built as a module. To do so, say M here.
the module will be called 'cc2520'.
+
+config IEEE802154_ATUSB
+ tristate "ATUSB transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154 && USB
+ ---help---
+ Say Y here to enable the ATUSB IEEE 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'atusb'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index d77fa4d77e27..cf1d2a6db023 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
+obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 67d00fbc2e0e..2f25a5ed8247 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -35,6 +35,8 @@
#include <net/mac802154.h>
#include <net/cfg802154.h>
+#include "at86rf230.h"
+
struct at86rf230_local;
/* at86rf2xx chip depend data.
* All timings are in us.
@@ -50,7 +52,7 @@ struct at86rf2xx_chip_data {
int rssi_base_val;
int (*set_channel)(struct at86rf230_local *, u8, u8);
- int (*get_desense_steps)(struct at86rf230_local *, s32);
+ int (*set_txpower)(struct at86rf230_local *, s32);
};
#define AT86RF2XX_MAX_BUF (127 + 3)
@@ -102,200 +104,6 @@ struct at86rf230_local {
struct at86rf230_state_change tx;
};
-#define RG_TRX_STATUS (0x01)
-#define SR_TRX_STATUS 0x01, 0x1f, 0
-#define SR_RESERVED_01_3 0x01, 0x20, 5
-#define SR_CCA_STATUS 0x01, 0x40, 6
-#define SR_CCA_DONE 0x01, 0x80, 7
-#define RG_TRX_STATE (0x02)
-#define SR_TRX_CMD 0x02, 0x1f, 0
-#define SR_TRAC_STATUS 0x02, 0xe0, 5
-#define RG_TRX_CTRL_0 (0x03)
-#define SR_CLKM_CTRL 0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
-#define SR_PAD_IO_CLKM 0x03, 0x30, 4
-#define SR_PAD_IO 0x03, 0xc0, 6
-#define RG_TRX_CTRL_1 (0x04)
-#define SR_IRQ_POLARITY 0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
-#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
-#define SR_RX_BL_CTRL 0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
-#define SR_PA_EXT_EN 0x04, 0x80, 7
-#define RG_PHY_TX_PWR (0x05)
-#define SR_TX_PWR 0x05, 0x0f, 0
-#define SR_PA_LT 0x05, 0x30, 4
-#define SR_PA_BUF_LT 0x05, 0xc0, 6
-#define RG_PHY_RSSI (0x06)
-#define SR_RSSI 0x06, 0x1f, 0
-#define SR_RND_VALUE 0x06, 0x60, 5
-#define SR_RX_CRC_VALID 0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL (0x07)
-#define SR_ED_LEVEL 0x07, 0xff, 0
-#define RG_PHY_CC_CCA (0x08)
-#define SR_CHANNEL 0x08, 0x1f, 0
-#define SR_CCA_MODE 0x08, 0x60, 5
-#define SR_CCA_REQUEST 0x08, 0x80, 7
-#define RG_CCA_THRES (0x09)
-#define SR_CCA_ED_THRES 0x09, 0x0f, 0
-#define SR_RESERVED_09_1 0x09, 0xf0, 4
-#define RG_RX_CTRL (0x0a)
-#define SR_PDT_THRES 0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
-#define RG_SFD_VALUE (0x0b)
-#define SR_SFD_VALUE 0x0b, 0xff, 0
-#define RG_TRX_CTRL_2 (0x0c)
-#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_SUB_MODE 0x0c, 0x04, 2
-#define SR_BPSK_QPSK 0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
-#define SR_RESERVED_0c_5 0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
-#define RG_ANT_DIV (0x0d)
-#define SR_ANT_CTRL 0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
-#define SR_ANT_DIV_EN 0x0d, 0x08, 3
-#define SR_RESERVED_0d_2 0x0d, 0x70, 4
-#define SR_ANT_SEL 0x0d, 0x80, 7
-#define RG_IRQ_MASK (0x0e)
-#define SR_IRQ_MASK 0x0e, 0xff, 0
-#define RG_IRQ_STATUS (0x0f)
-#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
-#define SR_IRQ_5_AMI 0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
-#define RG_VREG_CTRL (0x10)
-#define SR_RESERVED_10_6 0x10, 0x03, 0
-#define SR_DVDD_OK 0x10, 0x04, 2
-#define SR_DVREG_EXT 0x10, 0x08, 3
-#define SR_RESERVED_10_3 0x10, 0x30, 4
-#define SR_AVDD_OK 0x10, 0x40, 6
-#define SR_AVREG_EXT 0x10, 0x80, 7
-#define RG_BATMON (0x11)
-#define SR_BATMON_VTH 0x11, 0x0f, 0
-#define SR_BATMON_HR 0x11, 0x10, 4
-#define SR_BATMON_OK 0x11, 0x20, 5
-#define SR_RESERVED_11_1 0x11, 0xc0, 6
-#define RG_XOSC_CTRL (0x12)
-#define SR_XTAL_TRIM 0x12, 0x0f, 0
-#define SR_XTAL_MODE 0x12, 0xf0, 4
-#define RG_RX_SYN (0x15)
-#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
-#define SR_RESERVED_15_2 0x15, 0x70, 4
-#define SR_RX_PDT_DIS 0x15, 0x80, 7
-#define RG_XAH_CTRL_1 (0x17)
-#define SR_RESERVED_17_8 0x17, 0x01, 0
-#define SR_AACK_PROM_MODE 0x17, 0x02, 1
-#define SR_AACK_ACK_TIME 0x17, 0x04, 2
-#define SR_RESERVED_17_5 0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
-#define SR_RESERVED_17_1 0x17, 0x80, 7
-#define RG_FTN_CTRL (0x18)
-#define SR_RESERVED_18_2 0x18, 0x7f, 0
-#define SR_FTN_START 0x18, 0x80, 7
-#define RG_PLL_CF (0x1a)
-#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
-#define SR_PLL_CF_START 0x1a, 0x80, 7
-#define RG_PLL_DCU (0x1b)
-#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2 0x1b, 0x40, 6
-#define SR_PLL_DCU_START 0x1b, 0x80, 7
-#define RG_PART_NUM (0x1c)
-#define SR_PART_NUM 0x1c, 0xff, 0
-#define RG_VERSION_NUM (0x1d)
-#define SR_VERSION_NUM 0x1d, 0xff, 0
-#define RG_MAN_ID_0 (0x1e)
-#define SR_MAN_ID_0 0x1e, 0xff, 0
-#define RG_MAN_ID_1 (0x1f)
-#define SR_MAN_ID_1 0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0 (0x20)
-#define SR_SHORT_ADDR_0 0x20, 0xff, 0
-#define RG_SHORT_ADDR_1 (0x21)
-#define SR_SHORT_ADDR_1 0x21, 0xff, 0
-#define RG_PAN_ID_0 (0x22)
-#define SR_PAN_ID_0 0x22, 0xff, 0
-#define RG_PAN_ID_1 (0x23)
-#define SR_PAN_ID_1 0x23, 0xff, 0
-#define RG_IEEE_ADDR_0 (0x24)
-#define SR_IEEE_ADDR_0 0x24, 0xff, 0
-#define RG_IEEE_ADDR_1 (0x25)
-#define SR_IEEE_ADDR_1 0x25, 0xff, 0
-#define RG_IEEE_ADDR_2 (0x26)
-#define SR_IEEE_ADDR_2 0x26, 0xff, 0
-#define RG_IEEE_ADDR_3 (0x27)
-#define SR_IEEE_ADDR_3 0x27, 0xff, 0
-#define RG_IEEE_ADDR_4 (0x28)
-#define SR_IEEE_ADDR_4 0x28, 0xff, 0
-#define RG_IEEE_ADDR_5 (0x29)
-#define SR_IEEE_ADDR_5 0x29, 0xff, 0
-#define RG_IEEE_ADDR_6 (0x2a)
-#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7 (0x2b)
-#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
-#define RG_XAH_CTRL_0 (0x2c)
-#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0 (0x2d)
-#define SR_CSMA_SEED_0 0x2d, 0xff, 0
-#define RG_CSMA_SEED_1 (0x2e)
-#define SR_CSMA_SEED_1 0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
-#define SR_AACK_SET_PD 0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
-#define RG_CSMA_BE (0x2f)
-#define SR_MIN_BE 0x2f, 0x0f, 0
-#define SR_MAX_BE 0x2f, 0xf0, 4
-
-#define CMD_REG 0x80
-#define CMD_REG_MASK 0x3f
-#define CMD_WRITE 0x40
-#define CMD_FB 0x20
-
-#define IRQ_BAT_LOW (1 << 7)
-#define IRQ_TRX_UR (1 << 6)
-#define IRQ_AMI (1 << 5)
-#define IRQ_CCA_ED (1 << 4)
-#define IRQ_TRX_END (1 << 3)
-#define IRQ_RX_START (1 << 2)
-#define IRQ_PLL_UNL (1 << 1)
-#define IRQ_PLL_LOCK (1 << 0)
-
-#define IRQ_ACTIVE_HIGH 0
-#define IRQ_ACTIVE_LOW 1
-
-#define STATE_P_ON 0x00 /* BUSY */
-#define STATE_BUSY_RX 0x01
-#define STATE_BUSY_TX 0x02
-#define STATE_FORCE_TRX_OFF 0x03
-#define STATE_FORCE_TX_ON 0x04 /* IDLE */
-/* 0x05 */ /* INVALID_PARAMETER */
-#define STATE_RX_ON 0x06
-/* 0x07 */ /* SUCCESS */
-#define STATE_TRX_OFF 0x08
-#define STATE_TX_ON 0x09
-/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP 0x0F
-#define STATE_PREP_DEEP_SLEEP 0x10
-#define STATE_BUSY_RX_AACK 0x11
-#define STATE_BUSY_TX_ARET 0x12
-#define STATE_RX_AACK_ON 0x16
-#define STATE_TX_ARET_ON 0x19
-#define STATE_RX_ON_NOCLK 0x1C
-#define STATE_RX_AACK_ON_NOCLK 0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-#define TRX_STATE_MASK (0x1F)
-
#define AT86RF2XX_NUMREGS 0x3F
static void
@@ -1010,7 +818,7 @@ at86rf230_xmit_start(void *context)
if (lp->is_tx_from_off) {
lp->is_tx_from_off = false;
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_xmit_tx_on,
+ at86rf230_write_frame,
false);
} else {
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
@@ -1076,6 +884,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+ -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+ -8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+ -7800, -7600, -7400, -7200, -7000, -6800,
+};
+
+static inline int
+at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val)
+{
+ unsigned int cca_ed_thres;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres);
+ if (rc < 0)
+ return rc;
+
+ switch (rssi_base_val) {
+ case -98:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+ break;
+ case -100:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
static int
at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
@@ -1098,6 +950,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
if (rc < 0)
return rc;
+ rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val);
+ if (rc < 0)
+ return rc;
+
/* This sets the symbol_duration according frequency on the 212.
* TODO move this handling while set channel and page in cfg802154.
* We can do that, this timings are according 802.15.4 standard.
@@ -1193,23 +1049,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
return 0;
}
+#define AT86RF23X_MAX_TX_POWERS 0xF
+static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+ 400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600,
+ -800, -1200, -1700,
+};
+
+static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+ 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+ -900, -1200, -1700,
+};
+
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+ 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+ -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+ -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
+static int
+at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+ u32 i;
+
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i);
+ }
+
+ return -EINVAL;
+}
+
static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
+at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm)
{
- struct at86rf230_local *lp = hw->priv;
+ u32 i;
- /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
- * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
- * 0dB.
- * thus, supported values for db range from -26 to 5, for 31dB of
- * reduction to 0dB of reduction.
- */
- if (db > 5 || db < -26)
- return -EINVAL;
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_TX_PWR_212, i);
+ }
- db = -(db - 5);
+ return -EINVAL;
+}
+
+static int
+at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct at86rf230_local *lp = hw->priv;
- return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+ return lp->data->set_txpower(lp, mbm);
}
static int
@@ -1254,28 +1143,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-static int
-at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
- return (level - lp->data->rssi_base_val) * 100 / 207;
-}
-
-static int
-at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
- return (level - lp->data->rssi_base_val) / 2;
-}
static int
-at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
struct at86rf230_local *lp = hw->priv;
+ u32 i;
- if (level < lp->data->rssi_base_val || level > 30)
- return -EINVAL;
+ for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+ if (hw->phy->supported.cca_ed_levels[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i);
+ }
- return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
- lp->data->get_desense_steps(lp, level));
+ return -EINVAL;
}
static int
@@ -1365,7 +1245,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_p_ack = 545,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
- .get_desense_steps = at86rf23x_get_desens_steps
+ .set_txpower = at86rf23x_set_txpower,
};
static struct at86rf2xx_chip_data at86rf231_data = {
@@ -1378,7 +1258,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_p_ack = 545,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
- .get_desense_steps = at86rf23x_get_desens_steps
+ .set_txpower = at86rf23x_set_txpower,
};
static struct at86rf2xx_chip_data at86rf212_data = {
@@ -1391,7 +1271,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_p_ack = 545,
.rssi_base_val = -100,
.set_channel = at86rf212_set_channel,
- .get_desense_steps = at86rf212_get_desens_steps
+ .set_txpower = at86rf212_set_txpower,
};
static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
@@ -1564,8 +1444,21 @@ at86rf230_detect_device(struct at86rf230_local *lp)
}
lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
- IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
- IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+ IEEE802154_HW_CSMA_PARAMS |
+ IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
+ IEEE802154_HW_PROMISCUOUS;
+
+ lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
+ WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+ lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
@@ -1573,36 +1466,49 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 2:
chip = "at86rf230";
rc = -ENOTSUPP;
- break;
+ goto not_supp;
case 3:
chip = "at86rf231";
lp->data = &at86rf231_data;
- lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 11;
lp->hw->phy->symbol_duration = 16;
+ lp->hw->phy->supported.tx_powers = at86rf231_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
break;
case 7:
chip = "at86rf212";
lp->data = &at86rf212_data;
lp->hw->flags |= IEEE802154_HW_LBT;
- lp->hw->phy->channels_supported[0] = 0x00007FF;
- lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->supported.channels[0] = 0x00007FF;
+ lp->hw->phy->supported.channels[2] = 0x00007FF;
lp->hw->phy->current_channel = 5;
lp->hw->phy->symbol_duration = 25;
+ lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+ lp->hw->phy->supported.tx_powers = at86rf212_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
break;
case 11:
chip = "at86rf233";
lp->data = &at86rf233_data;
- lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 13;
lp->hw->phy->symbol_duration = 16;
+ lp->hw->phy->supported.tx_powers = at86rf233_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
break;
default:
chip = "unknown";
rc = -ENOTSUPP;
- break;
+ goto not_supp;
}
+ lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7];
+ lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0];
+
+not_supp:
dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
return rc;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
new file mode 100644
index 000000000000..1e6d1cc677f6
--- /dev/null
+++ b/drivers/net/ieee802154/at86rf230.h
@@ -0,0 +1,220 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef _AT86RF230_H
+#define _AT86RF230_H
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR_23X 0x05, 0x0f, 0
+#define SR_PA_LT_230 0x05, 0x30, 4
+#define SR_PA_BUF_LT_230 0x05, 0xc0, 6
+#define SR_TX_PWR_212 0x05, 0x1f, 0
+#define SR_GC_PA_212 0x05, 0x60, 5
+#define SR_PA_BOOST_LT_212 0x05, 0x80, 7
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW BIT(7)
+#define IRQ_TRX_UR BIT(6)
+#define IRQ_AMI BIT(5)
+#define IRQ_CCA_ED BIT(4)
+#define IRQ_TRX_END BIT(3)
+#define IRQ_RX_START BIT(2)
+#define IRQ_PLL_UNL BIT(1)
+#define IRQ_PLL_LOCK BIT(0)
+
+#define IRQ_ACTIVE_HIGH 0
+#define IRQ_ACTIVE_LOW 1
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_PREP_DEEP_SLEEP 0x10
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_RX_AACK_ON 0x16
+#define STATE_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+#define TRX_STATE_MASK (0x1F)
+
+#endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
new file mode 100644
index 000000000000..5b6bb9adf9ae
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.c
@@ -0,0 +1,699 @@
+/*
+ * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * Based on at86rf230.c and spi_atusb.c.
+ * at86rf230.c is
+ * Copyright (C) 2009 Siemens AG
+ * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ *
+ * spi_atusb.c is
+ * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com>
+ * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org>
+ * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net>
+ *
+ * USB initialization is
+ * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+#include <net/mac802154.h>
+
+#include "at86rf230.h"
+#include "atusb.h"
+
+#define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */
+
+#define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */
+#define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */
+#define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */
+
+struct atusb {
+ struct ieee802154_hw *hw;
+ struct usb_device *usb_dev;
+ int shutdown; /* non-zero if shutting down */
+ int err; /* set by first error */
+
+ /* RX variables */
+ struct delayed_work work; /* memory allocations */
+ struct usb_anchor idle_urbs; /* URBs waiting to be submitted */
+ struct usb_anchor rx_urbs; /* URBs waiting for reception */
+
+ /* TX variables */
+ struct usb_ctrlrequest tx_dr;
+ struct urb *tx_urb;
+ struct sk_buff *tx_skb;
+ uint8_t tx_ack_seq; /* current TX ACK sequence number */
+};
+
+/* at86rf230.h defines values as <reg, mask, shift> tuples. We use the more
+ * traditional style of having registers and or-able values. SR_REG extracts
+ * the register number. SR_VALUE uses the shift to prepare a value accordingly.
+ */
+
+#define __SR_REG(reg, mask, shift) (reg)
+#define SR_REG(sr) __SR_REG(sr)
+
+#define __SR_VALUE(reg, mask, shift, val) ((val) << (shift))
+#define SR_VALUE(sr, val) __SR_VALUE(sr, (val))
+
+/* ----- USB commands without data ----------------------------------------- */
+
+/* To reduce the number of error checks in the code, we record the first error
+ * in atusb->err and reject all subsequent requests until the error is cleared.
+ */
+
+static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
+ __u8 request, __u8 requesttype,
+ __u16 value, __u16 index,
+ void *data, __u16 size, int timeout)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ if (atusb->err)
+ return atusb->err;
+
+ ret = usb_control_msg(usb_dev, pipe, request, requesttype,
+ value, index, data, size, timeout);
+ if (ret < 0) {
+ atusb->err = ret;
+ dev_err(&usb_dev->dev,
+ "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+ request, value, index, ret);
+ }
+ return ret;
+}
+
+static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+ return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+ cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
+}
+
+static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
+ reg, value);
+ return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+ ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ value, reg, NULL, 0, 1000);
+}
+
+static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+ uint8_t value;
+
+ dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, reg, &value, 1, 1000);
+ return ret >= 0 ? value : ret;
+}
+
+static int atusb_get_and_clear_error(struct atusb *atusb)
+{
+ int err = atusb->err;
+
+ atusb->err = 0;
+ return err;
+}
+
+/* ----- skb allocation ---------------------------------------------------- */
+
+#define MAX_PSDU 127
+#define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */
+
+#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb)
+
+static void atusb_in(struct urb *urb);
+
+static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ struct sk_buff *skb = urb->context;
+ int ret;
+
+ if (!skb) {
+ skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
+ if (!skb) {
+ dev_warn_ratelimited(&usb_dev->dev,
+ "atusb_in: can't allocate skb\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, MAX_RX_XFER);
+ SKB_ATUSB(skb) = atusb;
+ }
+
+ usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1),
+ skb->data, MAX_RX_XFER, atusb_in, skb);
+ usb_anchor_urb(urb, &atusb->rx_urbs);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree_skb(skb);
+ urb->context = NULL;
+ }
+ return ret;
+}
+
+static void atusb_work_urbs(struct work_struct *work)
+{
+ struct atusb *atusb =
+ container_of(to_delayed_work(work), struct atusb, work);
+ struct usb_device *usb_dev = atusb->usb_dev;
+ struct urb *urb;
+ int ret;
+
+ if (atusb->shutdown)
+ return;
+
+ do {
+ urb = usb_get_from_anchor(&atusb->idle_urbs);
+ if (!urb)
+ return;
+ ret = atusb_submit_rx_urb(atusb, urb);
+ } while (!ret);
+
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ dev_warn_ratelimited(&usb_dev->dev,
+ "atusb_in: can't allocate/submit URB (%d)\n", ret);
+ schedule_delayed_work(&atusb->work,
+ msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
+}
+
+/* ----- Asynchronous USB -------------------------------------------------- */
+
+static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ uint8_t expect = atusb->tx_ack_seq;
+
+ dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+ if (seq == expect) {
+ /* TODO check for ifs handling in firmware */
+ ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+ } else {
+ /* TODO I experience this case when atusb has a tx complete
+ * irq before probing, we should fix the firmware it's an
+ * unlikely case now that seq == expect is then true, but can
+ * happen and fail with a tx_skb = NULL;
+ */
+ ieee802154_wake_queue(atusb->hw);
+ if (atusb->tx_skb)
+ dev_kfree_skb_irq(atusb->tx_skb);
+ }
+}
+
+static void atusb_in_good(struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct sk_buff *skb = urb->context;
+ struct atusb *atusb = SKB_ATUSB(skb);
+ uint8_t len, lqi;
+
+ if (!urb->actual_length) {
+ dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
+ return;
+ }
+
+ len = *skb->data;
+
+ if (urb->actual_length == 1) {
+ atusb_tx_done(atusb, len);
+ return;
+ }
+
+ if (len + 1 > urb->actual_length - 1) {
+ dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n",
+ len, urb->actual_length);
+ return;
+ }
+
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n");
+ return;
+ }
+
+ lqi = skb->data[len + 1];
+ dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi);
+ skb_pull(skb, 1); /* remove PHR */
+ skb_trim(skb, len); /* get payload only */
+ ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
+ urb->context = NULL; /* skb is gone */
+}
+
+static void atusb_in(struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct sk_buff *skb = urb->context;
+ struct atusb *atusb = SKB_ATUSB(skb);
+
+ dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+ urb->status, urb->actual_length);
+ if (urb->status) {
+ if (urb->status == -ENOENT) { /* being killed */
+ kfree_skb(skb);
+ urb->context = NULL;
+ return;
+ }
+ dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+ } else {
+ atusb_in_good(urb);
+ }
+
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ if (!atusb->shutdown)
+ schedule_delayed_work(&atusb->work, 0);
+}
+
+/* ----- URB allocation/deallocation --------------------------------------- */
+
+static void atusb_free_urbs(struct atusb *atusb)
+{
+ struct urb *urb;
+
+ while (1) {
+ urb = usb_get_from_anchor(&atusb->idle_urbs);
+ if (!urb)
+ break;
+ if (urb->context)
+ kfree_skb(urb->context);
+ usb_free_urb(urb);
+ }
+}
+
+static int atusb_alloc_urbs(struct atusb *atusb, int n)
+{
+ struct urb *urb;
+
+ while (n) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ atusb_free_urbs(atusb);
+ return -ENOMEM;
+ }
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ n--;
+ }
+ return 0;
+}
+
+/* ----- IEEE 802.15.4 interface operations -------------------------------- */
+
+static void atusb_xmit_complete(struct urb *urb)
+{
+ dev_dbg(&urb->dev->dev, "atusb_xmit urb completed");
+}
+
+static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+ atusb->tx_skb = skb;
+ atusb->tx_ack_seq++;
+ atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
+ atusb->tx_dr.wLength = cpu_to_le16(skb->len);
+
+ usb_fill_control_urb(atusb->tx_urb, usb_dev,
+ usb_sndctrlpipe(usb_dev, 0),
+ (unsigned char *)&atusb->tx_dr, skb->data,
+ skb->len, atusb_xmit_complete, NULL);
+ ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
+ dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+ return ret;
+}
+
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct atusb *atusb = hw->priv;
+ int ret;
+
+ /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
+ * "Mode 3a, Carrier sense OR energy above threshold".
+ * We should probably make this configurable. @@@
+ */
+ ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+ if (ret < 0)
+ return ret;
+ msleep(1); /* @@@ ugly synchronization */
+ return 0;
+}
+
+static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct atusb *atusb = hw->priv;
+ struct device *dev = &atusb->usb_dev->dev;
+ uint8_t reg;
+
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+ atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
+ atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+ atusb_write_reg(atusb, RG_PAN_ID_0, pan);
+ atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+ u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
+
+ memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+ for (i = 0; i < 8; i++)
+ atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+ }
+
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+ dev_vdbg(dev,
+ "atusb_set_hw_addr_filt called for panc change\n");
+ reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD));
+ if (filt->pan_coord)
+ reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1);
+ else
+ reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1);
+ atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg);
+ }
+
+ return atusb_get_and_clear_error(atusb);
+}
+
+static int atusb_start(struct ieee802154_hw *hw)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ dev_dbg(&usb_dev->dev, "atusb_start\n");
+ schedule_delayed_work(&atusb->work, 0);
+ atusb_command(atusb, ATUSB_RX_MODE, 1);
+ ret = atusb_get_and_clear_error(atusb);
+ if (ret < 0)
+ usb_kill_anchored_urbs(&atusb->idle_urbs);
+ return ret;
+}
+
+static void atusb_stop(struct ieee802154_hw *hw)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_stop\n");
+ usb_kill_anchored_urbs(&atusb->idle_urbs);
+ atusb_command(atusb, ATUSB_RX_MODE, 0);
+ atusb_get_and_clear_error(atusb);
+}
+
+static struct ieee802154_ops atusb_ops = {
+ .owner = THIS_MODULE,
+ .xmit_async = atusb_xmit,
+ .ed = atusb_ed,
+ .set_channel = atusb_channel,
+ .start = atusb_start,
+ .stop = atusb_stop,
+ .set_hw_addr_filt = atusb_set_hw_addr_filt,
+};
+
+/* ----- Firmware and chip version information ----------------------------- */
+
+static int atusb_get_and_show_revision(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ unsigned char buffer[3];
+ int ret;
+
+ /* Get a couple of the ATMega Firmware values */
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, 3, 1000);
+ if (ret >= 0)
+ dev_info(&usb_dev->dev,
+ "Firmware: major: %u, minor: %u, hardware type: %u\n",
+ buffer[0], buffer[1], buffer[2]);
+ if (buffer[0] == 0 && buffer[1] < 2) {
+ dev_info(&usb_dev->dev,
+ "Firmware version (%u.%u) is predates our first public release.",
+ buffer[0], buffer[1]);
+ dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+ }
+
+ return ret;
+}
+
+static int atusb_get_and_show_build(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ char build[ATUSB_BUILD_SIZE + 1];
+ int ret;
+
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+ build, ATUSB_BUILD_SIZE, 1000);
+ if (ret >= 0) {
+ build[ret] = 0;
+ dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+ }
+
+ return ret;
+}
+
+static int atusb_get_and_show_chip(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ uint8_t man_id_0, man_id_1, part_num, version_num;
+
+ man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
+ man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
+ part_num = atusb_read_reg(atusb, RG_PART_NUM);
+ version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+
+ if (atusb->err)
+ return atusb->err;
+
+ if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
+ dev_err(&usb_dev->dev,
+ "non-Atmel transceiver xxxx%02x%02x\n",
+ man_id_1, man_id_0);
+ goto fail;
+ }
+ if (part_num != 3 && part_num != 2) {
+ dev_err(&usb_dev->dev,
+ "unexpected transceiver, part 0x%02x version 0x%02x\n",
+ part_num, version_num);
+ goto fail;
+ }
+
+ dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+
+ return 0;
+
+fail:
+ atusb->err = -ENODEV;
+ return -ENODEV;
+}
+
+/* ----- Setup ------------------------------------------------------------- */
+
+static int atusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(interface);
+ struct ieee802154_hw *hw;
+ struct atusb *atusb = NULL;
+ int ret = -ENOMEM;
+
+ hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ atusb = hw->priv;
+ atusb->hw = hw;
+ atusb->usb_dev = usb_get_dev(usb_dev);
+ usb_set_intfdata(interface, atusb);
+
+ atusb->shutdown = 0;
+ atusb->err = 0;
+ INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
+ init_usb_anchor(&atusb->idle_urbs);
+ init_usb_anchor(&atusb->rx_urbs);
+
+ if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS))
+ goto fail;
+
+ atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV;
+ atusb->tx_dr.bRequest = ATUSB_TX;
+ atusb->tx_dr.wValue = cpu_to_le16(0);
+
+ atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!atusb->tx_urb)
+ goto fail;
+
+ hw->parent = &usb_dev->dev;
+ hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+ IEEE802154_HW_AACK;
+
+ hw->phy->current_page = 0;
+ hw->phy->current_channel = 11; /* reset default */
+ hw->phy->supported.channels[0] = 0x7FFF800;
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ atusb_command(atusb, ATUSB_RF_RESET, 0);
+ atusb_get_and_show_chip(atusb);
+ atusb_get_and_show_revision(atusb);
+ atusb_get_and_show_build(atusb);
+ ret = atusb_get_and_clear_error(atusb);
+ if (ret) {
+ dev_err(&atusb->usb_dev->dev,
+ "%s: initialization failed, error = %d\n",
+ __func__, ret);
+ goto fail;
+ }
+
+ ret = ieee802154_register_hw(hw);
+ if (ret)
+ goto fail;
+
+ /* If we just powered on, we're now in P_ON and need to enter TRX_OFF
+ * explicitly. Any resets after that will send us straight to TRX_OFF,
+ * making the command below redundant.
+ */
+ atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+ msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */
+
+#if 0
+ /* Calculating the maximum time available to empty the frame buffer
+ * on reception:
+ *
+ * According to [1], the inter-frame gap is
+ * R * 20 * 16 us + 128 us
+ * where R is a random number from 0 to 7. Furthermore, we have 20 bit
+ * times (80 us at 250 kbps) of SHR of the next frame before the
+ * transceiver begins storing data in the frame buffer.
+ *
+ * This yields a minimum time of 208 us between the last data of a
+ * frame and the first data of the next frame. This time is further
+ * reduced by interrupt latency in the atusb firmware.
+ *
+ * atusb currently needs about 500 us to retrieve a maximum-sized
+ * frame. We therefore have to allow reception of a new frame to begin
+ * while we retrieve the previous frame.
+ *
+ * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based
+ * network", Jennic 2006.
+ * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf
+ */
+
+ atusb_write_reg(atusb,
+ SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1));
+#endif
+ atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+
+ ret = atusb_get_and_clear_error(atusb);
+ if (!ret)
+ return 0;
+
+ dev_err(&atusb->usb_dev->dev,
+ "%s: setup failed, error = %d\n",
+ __func__, ret);
+
+ ieee802154_unregister_hw(hw);
+fail:
+ atusb_free_urbs(atusb);
+ usb_kill_urb(atusb->tx_urb);
+ usb_free_urb(atusb->tx_urb);
+ usb_put_dev(usb_dev);
+ ieee802154_free_hw(hw);
+ return ret;
+}
+
+static void atusb_disconnect(struct usb_interface *interface)
+{
+ struct atusb *atusb = usb_get_intfdata(interface);
+
+ dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+
+ atusb->shutdown = 1;
+ cancel_delayed_work_sync(&atusb->work);
+
+ usb_kill_anchored_urbs(&atusb->rx_urbs);
+ atusb_free_urbs(atusb);
+ usb_kill_urb(atusb->tx_urb);
+ usb_free_urb(atusb->tx_urb);
+
+ ieee802154_unregister_hw(atusb->hw);
+
+ ieee802154_free_hw(atusb->hw);
+
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(atusb->usb_dev);
+
+ pr_debug("atusb_disconnect done\n");
+}
+
+/* The devices we work with */
+static const struct usb_device_id atusb_device_table[] = {
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = ATUSB_VENDOR_ID,
+ .idProduct = ATUSB_PRODUCT_ID,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC
+ },
+ /* end with null element */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, atusb_device_table);
+
+static struct usb_driver atusb_driver = {
+ .name = "atusb",
+ .probe = atusb_probe,
+ .disconnect = atusb_disconnect,
+ .id_table = atusb_device_table,
+};
+module_usb_driver(atusb_driver);
+
+MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
+MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
+MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
+MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
new file mode 100644
index 000000000000..0690edcad57b
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.h
@@ -0,0 +1,84 @@
+/*
+ * atusb.h - Definitions shared between kernel and ATUSB firmware
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2, or
+ * (at your option) any later version.
+ *
+ * This file should be identical for kernel and firmware.
+ * Kernel: drivers/net/ieee802154/atusb.h
+ * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
+ */
+
+#ifndef _ATUSB_H
+#define _ATUSB_H
+
+#define ATUSB_VENDOR_ID 0x20b7 /* Qi Hardware*/
+#define ATUSB_PRODUCT_ID 0x1540 /* 802.15.4, device 0 */
+ /* -- - - */
+
+#define ATUSB_BUILD_SIZE 256 /* maximum build version/date message length */
+
+/* Commands to our device. Make sure this is synced with the firmware */
+enum atusb_requests {
+ ATUSB_ID = 0x00, /* system status/control grp */
+ ATUSB_BUILD,
+ ATUSB_RESET,
+ ATUSB_RF_RESET = 0x10, /* debug/test group */
+ ATUSB_POLL_INT,
+ ATUSB_TEST, /* atusb-sil only */
+ ATUSB_TIMER,
+ ATUSB_GPIO,
+ ATUSB_SLP_TR,
+ ATUSB_GPIO_CLEANUP,
+ ATUSB_REG_WRITE = 0x20, /* transceiver group */
+ ATUSB_REG_READ,
+ ATUSB_BUF_WRITE,
+ ATUSB_BUF_READ,
+ ATUSB_SRAM_WRITE,
+ ATUSB_SRAM_READ,
+ ATUSB_SPI_WRITE = 0x30, /* SPI group */
+ ATUSB_SPI_READ1,
+ ATUSB_SPI_READ2,
+ ATUSB_SPI_WRITE2_SYNC,
+ ATUSB_RX_MODE = 0x40, /* HardMAC group */
+ ATUSB_TX,
+};
+
+/* Direction bRequest wValue wIndex wLength
+ *
+ * ->host ATUSB_ID - - 3
+ * ->host ATUSB_BUILD - - #bytes
+ * host-> ATUSB_RESET - - 0
+ *
+ * host-> ATUSB_RF_RESET - - 0
+ * ->host ATUSB_POLL_INT - - 1
+ * host-> ATUSB_TEST - - 0
+ * ->host ATUSB_TIMER - - #bytes (6)
+ * ->host ATUSB_GPIO dir+data mask+p# 3
+ * host-> ATUSB_SLP_TR - - 0
+ * host-> ATUSB_GPIO_CLEANUP - - 0
+ *
+ * host-> ATUSB_REG_WRITE value addr 0
+ * ->host ATUSB_REG_READ - addr 1
+ * host-> ATUSB_BUF_WRITE - - #bytes
+ * ->host ATUSB_BUF_READ - - #bytes
+ * host-> ATUSB_SRAM_WRITE - addr #bytes
+ * ->host ATUSB_SRAM_READ - addr #bytes
+ *
+ * host-> ATUSB_SPI_WRITE byte0 byte1 #bytes
+ * ->host ATUSB_SPI_READ1 byte0 - #bytes
+ * ->host ATUSB_SPI_READ2 byte0 byte1 #bytes
+ * ->host ATUSB_SPI_WRITE2_SYNC byte0 byte1 0/1
+ *
+ * host-> ATUSB_RX_MODE on - 0
+ * host-> ATUSB_TX flags ack_seq #bytes
+ */
+
+#define ATUSB_REQ_FROM_DEV (USB_TYPE_VENDOR | USB_DIR_IN)
+#define ATUSB_REQ_TO_DEV (USB_TYPE_VENDOR | USB_DIR_OUT)
+
+#endif /* !_ATUSB_H */
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f833b8bb6663..84b28a05c5a1 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv)
ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
/* We do support only 2.4 Ghz */
- priv->hw->phy->channels_supported[0] = 0x7FFF800;
+ priv->hw->phy->supported.channels[0] = 0x7FFF800;
priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_AFILT;
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index dc2bfb600b4b..9d0da4ec3e8c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -27,25 +27,25 @@
#include <net/mac802154.h>
#include <net/cfg802154.h>
-static int numlbs = 1;
+static int numlbs = 2;
-struct fakelb_dev_priv {
- struct ieee802154_hw *hw;
+static LIST_HEAD(fakelb_phys);
+static DEFINE_SPINLOCK(fakelb_phys_lock);
- struct list_head list;
- struct fakelb_priv *fake;
+static LIST_HEAD(fakelb_ifup_phys);
+static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
- spinlock_t lock;
- bool working;
-};
+struct fakelb_phy {
+ struct ieee802154_hw *hw;
+
+ u8 page;
+ u8 channel;
-struct fakelb_priv {
struct list_head list;
- rwlock_t lock;
+ struct list_head list_ifup;
};
-static int
-fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
BUG_ON(!level);
*level = 0xbe;
@@ -53,78 +53,63 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
return 0;
}
-static int
-fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- pr_debug("set channel to %d\n", channel);
+ struct fakelb_phy *phy = hw->priv;
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ phy->page = page;
+ phy->channel = channel;
+ write_unlock_bh(&fakelb_ifup_phys_lock);
return 0;
}
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct sk_buff *newskb;
+ struct fakelb_phy *current_phy = hw->priv, *phy;
- spin_lock(&priv->lock);
- if (priv->working) {
- newskb = pskb_copy(skb, GFP_ATOMIC);
- ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
- }
- spin_unlock(&priv->lock);
-}
+ read_lock_bh(&fakelb_ifup_phys_lock);
+ list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) {
+ if (current_phy == phy)
+ continue;
-static int
-fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
-{
- struct fakelb_dev_priv *priv = hw->priv;
- struct fakelb_priv *fake = priv->fake;
-
- read_lock_bh(&fake->lock);
- if (priv->list.next == priv->list.prev) {
- /* we are the only one device */
- fakelb_hw_deliver(priv, skb);
- } else {
- struct fakelb_dev_priv *dp;
- list_for_each_entry(dp, &priv->fake->list, list) {
- if (dp != priv &&
- (dp->hw->phy->current_channel ==
- priv->hw->phy->current_channel))
- fakelb_hw_deliver(dp, skb);
+ if (current_phy->page == phy->page &&
+ current_phy->channel == phy->channel) {
+ struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+ if (newskb)
+ ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc);
}
}
- read_unlock_bh(&fake->lock);
+ read_unlock_bh(&fakelb_ifup_phys_lock);
+ ieee802154_xmit_complete(hw, skb, false);
return 0;
}
-static int
-fakelb_hw_start(struct ieee802154_hw *hw) {
- struct fakelb_dev_priv *priv = hw->priv;
- int ret = 0;
+static int fakelb_hw_start(struct ieee802154_hw *hw)
+{
+ struct fakelb_phy *phy = hw->priv;
- spin_lock(&priv->lock);
- if (priv->working)
- ret = -EBUSY;
- else
- priv->working = 1;
- spin_unlock(&priv->lock);
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ list_add(&phy->list_ifup, &fakelb_ifup_phys);
+ write_unlock_bh(&fakelb_ifup_phys_lock);
- return ret;
+ return 0;
}
-static void
-fakelb_hw_stop(struct ieee802154_hw *hw) {
- struct fakelb_dev_priv *priv = hw->priv;
+static void fakelb_hw_stop(struct ieee802154_hw *hw)
+{
+ struct fakelb_phy *phy = hw->priv;
- spin_lock(&priv->lock);
- priv->working = 0;
- spin_unlock(&priv->lock);
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ list_del(&phy->list_ifup);
+ write_unlock_bh(&fakelb_ifup_phys_lock);
}
static const struct ieee802154_ops fakelb_ops = {
.owner = THIS_MODULE,
- .xmit_sync = fakelb_hw_xmit,
+ .xmit_async = fakelb_hw_xmit,
.ed = fakelb_hw_ed,
.set_channel = fakelb_hw_channel,
.start = fakelb_hw_start,
@@ -135,54 +120,54 @@ static const struct ieee802154_ops fakelb_ops = {
module_param(numlbs, int, 0);
MODULE_PARM_DESC(numlbs, " number of pseudo devices");
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+static int fakelb_add_one(struct device *dev)
{
- struct fakelb_dev_priv *priv;
- int err;
struct ieee802154_hw *hw;
+ struct fakelb_phy *phy;
+ int err;
- hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+ hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops);
if (!hw)
return -ENOMEM;
- priv = hw->priv;
- priv->hw = hw;
+ phy = hw->priv;
+ phy->hw = hw;
/* 868 MHz BPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 1;
+ hw->phy->supported.channels[0] |= 1;
/* 915 MHz BPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 0x7fe;
+ hw->phy->supported.channels[0] |= 0x7fe;
/* 2.4 GHz O-QPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 0x7FFF800;
+ hw->phy->supported.channels[0] |= 0x7FFF800;
/* 868 MHz ASK 802.15.4-2006 */
- hw->phy->channels_supported[1] |= 1;
+ hw->phy->supported.channels[1] |= 1;
/* 915 MHz ASK 802.15.4-2006 */
- hw->phy->channels_supported[1] |= 0x7fe;
+ hw->phy->supported.channels[1] |= 0x7fe;
/* 868 MHz O-QPSK 802.15.4-2006 */
- hw->phy->channels_supported[2] |= 1;
+ hw->phy->supported.channels[2] |= 1;
/* 915 MHz O-QPSK 802.15.4-2006 */
- hw->phy->channels_supported[2] |= 0x7fe;
+ hw->phy->supported.channels[2] |= 0x7fe;
/* 2.4 GHz CSS 802.15.4a-2007 */
- hw->phy->channels_supported[3] |= 0x3fff;
+ hw->phy->supported.channels[3] |= 0x3fff;
/* UWB Sub-gigahertz 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 1;
+ hw->phy->supported.channels[4] |= 1;
/* UWB Low band 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 0x1e;
+ hw->phy->supported.channels[4] |= 0x1e;
/* UWB High band 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 0xffe0;
+ hw->phy->supported.channels[4] |= 0xffe0;
/* 750 MHz O-QPSK 802.15.4c-2009 */
- hw->phy->channels_supported[5] |= 0xf;
+ hw->phy->supported.channels[5] |= 0xf;
/* 750 MHz MPSK 802.15.4c-2009 */
- hw->phy->channels_supported[5] |= 0xf0;
+ hw->phy->supported.channels[5] |= 0xf0;
/* 950 MHz BPSK 802.15.4d-2009 */
- hw->phy->channels_supported[6] |= 0x3ff;
+ hw->phy->supported.channels[6] |= 0x3ff;
/* 950 MHz GFSK 802.15.4d-2009 */
- hw->phy->channels_supported[6] |= 0x3ffc00;
+ hw->phy->supported.channels[6] |= 0x3ffc00;
- INIT_LIST_HEAD(&priv->list);
- priv->fake = fake;
-
- spin_lock_init(&priv->lock);
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+ /* fake phy channel 13 as default */
+ hw->phy->current_channel = 13;
+ phy->channel = hw->phy->current_channel;
hw->parent = dev;
@@ -190,67 +175,55 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
if (err)
goto err_reg;
- write_lock_bh(&fake->lock);
- list_add_tail(&priv->list, &fake->list);
- write_unlock_bh(&fake->lock);
+ spin_lock(&fakelb_phys_lock);
+ list_add_tail(&phy->list, &fakelb_phys);
+ spin_unlock(&fakelb_phys_lock);
return 0;
err_reg:
- ieee802154_free_hw(priv->hw);
+ ieee802154_free_hw(phy->hw);
return err;
}
-static void fakelb_del(struct fakelb_dev_priv *priv)
+static void fakelb_del(struct fakelb_phy *phy)
{
- write_lock_bh(&priv->fake->lock);
- list_del(&priv->list);
- write_unlock_bh(&priv->fake->lock);
+ list_del(&phy->list);
- ieee802154_unregister_hw(priv->hw);
- ieee802154_free_hw(priv->hw);
+ ieee802154_unregister_hw(phy->hw);
+ ieee802154_free_hw(phy->hw);
}
static int fakelb_probe(struct platform_device *pdev)
{
- struct fakelb_priv *priv;
- struct fakelb_dev_priv *dp;
- int err = -ENOMEM;
- int i;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
- GFP_KERNEL);
- if (!priv)
- goto err_alloc;
-
- INIT_LIST_HEAD(&priv->list);
- rwlock_init(&priv->lock);
+ struct fakelb_phy *phy, *tmp;
+ int err, i;
for (i = 0; i < numlbs; i++) {
- err = fakelb_add_one(&pdev->dev, priv);
+ err = fakelb_add_one(&pdev->dev);
if (err < 0)
goto err_slave;
}
- platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "added ieee802154 hardware\n");
return 0;
err_slave:
- list_for_each_entry(dp, &priv->list, list)
- fakelb_del(dp);
-err_alloc:
+ spin_lock(&fakelb_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+ fakelb_del(phy);
+ spin_unlock(&fakelb_phys_lock);
return err;
}
static int fakelb_remove(struct platform_device *pdev)
{
- struct fakelb_priv *priv = platform_get_drvdata(pdev);
- struct fakelb_dev_priv *dp, *temp;
-
- list_for_each_entry_safe(dp, temp, &priv->list, list)
- fakelb_del(dp);
+ struct fakelb_phy *phy, *tmp;
+ spin_lock(&fakelb_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+ fakelb_del(phy);
+ spin_unlock(&fakelb_phys_lock);
return 0;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index fba2dfd910f7..f2a1bd122a74 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi)
devrec->hw->priv = devrec;
devrec->hw->parent = &devrec->spi->dev;
- devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+ devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_AFILT;
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 54549a6223dd..953a97492fab 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -39,6 +39,8 @@
#define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS)
#define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1)
+#define IPVLAN_QBACKLOG_LIMIT 1000
+
typedef enum {
IPVL_IPV6 = 0,
IPVL_ICMPV6,
@@ -93,6 +95,8 @@ struct ipvl_port {
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
struct list_head ipvlans;
struct rcu_head rcu;
+ struct work_struct wq;
+ struct sk_buff_head backlog;
int count;
u16 mode;
};
@@ -112,6 +116,7 @@ void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval);
void ipvlan_init_secret(void);
unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
+void ipvlan_process_multicast(struct work_struct *work);
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c30b5c300c05..8afbedad620d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -189,62 +189,69 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr)
return hash & IPVLAN_MAC_FILTER_MASK;
}
-static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
- const struct ipvl_dev *in_dev, bool local)
+void ipvlan_process_multicast(struct work_struct *work)
{
- struct ethhdr *eth = eth_hdr(skb);
+ struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
+ struct ethhdr *ethh;
struct ipvl_dev *ipvlan;
- struct sk_buff *nskb;
+ struct sk_buff *skb, *nskb;
+ struct sk_buff_head list;
unsigned int len;
unsigned int mac_hash;
int ret;
+ u8 pkt_type;
+ bool hlocal, dlocal;
- if (skb->protocol == htons(ETH_P_PAUSE))
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
- if (local && (ipvlan == in_dev))
- continue;
+ __skb_queue_head_init(&list);
- mac_hash = ipvlan_mac_hash(eth->h_dest);
- if (!test_bit(mac_hash, ipvlan->mac_filters))
- continue;
+ spin_lock_bh(&port->backlog.lock);
+ skb_queue_splice_tail_init(&port->backlog, &list);
+ spin_unlock_bh(&port->backlog.lock);
- ret = NET_RX_DROP;
- len = skb->len + ETH_HLEN;
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- goto mcast_acct;
+ while ((skb = __skb_dequeue(&list)) != NULL) {
+ ethh = eth_hdr(skb);
+ hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+ mac_hash = ipvlan_mac_hash(ethh->h_dest);
- if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
+ if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
+ pkt_type = PACKET_BROADCAST;
else
- nskb->pkt_type = PACKET_MULTICAST;
-
- nskb->dev = ipvlan->dev;
- if (local)
- ret = dev_forward_skb(ipvlan->dev, nskb);
- else
- ret = netif_rx(nskb);
-mcast_acct:
- ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
- }
- rcu_read_unlock();
-
- /* Locally generated? ...Forward a copy to the main-device as
- * well. On the RX side we'll ignore it (wont give it to any
- * of the virtual devices.
- */
- if (local) {
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (nskb) {
- if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
+ pkt_type = PACKET_MULTICAST;
+
+ dlocal = false;
+ rcu_read_lock();
+ list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
+ if (hlocal && (ipvlan->dev == skb->dev)) {
+ dlocal = true;
+ continue;
+ }
+ if (!test_bit(mac_hash, ipvlan->mac_filters))
+ continue;
+
+ ret = NET_RX_DROP;
+ len = skb->len + ETH_HLEN;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto acct;
+
+ nskb->pkt_type = pkt_type;
+ nskb->dev = ipvlan->dev;
+ if (hlocal)
+ ret = dev_forward_skb(ipvlan->dev, nskb);
else
- nskb->pkt_type = PACKET_MULTICAST;
-
- dev_forward_skb(port->dev, nskb);
+ ret = netif_rx(nskb);
+acct:
+ ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+ }
+ rcu_read_unlock();
+
+ if (dlocal) {
+ /* If the packet originated here, send it out. */
+ skb->dev = port->dev;
+ skb->pkt_type = pkt_type;
+ dev_queue_xmit(skb);
+ } else {
+ kfree_skb(skb);
}
}
}
@@ -446,6 +453,26 @@ out:
return ret;
}
+static void ipvlan_multicast_enqueue(struct ipvl_port *port,
+ struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_PAUSE)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ spin_lock(&port->backlog.lock);
+ if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+ __skb_queue_tail(&port->backlog, skb);
+ spin_unlock(&port->backlog.lock);
+ schedule_work(&port->wq);
+ } else {
+ spin_unlock(&port->backlog.lock);
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+ }
+}
+
static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -493,11 +520,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
- u8 ip_summed = skb->ip_summed;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
- skb->ip_summed = ip_summed;
+ ipvlan_multicast_enqueue(ipvlan->port, skb);
+ return NET_XMIT_SUCCESS;
}
skb->dev = ipvlan->phy_dev;
@@ -581,8 +605,18 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
int addr_type;
if (is_multicast_ether_addr(eth->h_dest)) {
- if (ipvlan_external_frame(skb, port))
- ipvlan_multicast_frame(port, skb, NULL, false);
+ if (ipvlan_external_frame(skb, port)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ /* External frames are queued for device local
+ * distribution, but a copy is given to master
+ * straight away to avoid sending duplicates later
+ * when work-queue processes this frame. This is
+ * achieved by returning RX_HANDLER_PASS.
+ */
+ if (nskb)
+ ipvlan_multicast_enqueue(port, nskb);
+ }
} else {
struct ipvl_addr *addr;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 77b92a0fe557..1acc283160d9 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -54,6 +54,9 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
+ skb_queue_head_init(&port->backlog);
+ INIT_WORK(&port->wq, ipvlan_process_multicast);
+
err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
if (err)
goto err;
@@ -72,6 +75,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
netdev_rx_handler_unregister(dev);
+ cancel_work_sync(&port->wq);
+ __skb_queue_purge(&port->backlog);
kfree_rcu(port, rcu);
}
@@ -213,17 +218,6 @@ static void ipvlan_change_rx_flags(struct net_device *dev, int change)
dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
}
-static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
-{
- struct net_device *dev = ipvlan->dev;
- unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
-
- if (set && !test_bit(hashbit, ipvlan->mac_filters))
- __set_bit(hashbit, ipvlan->mac_filters);
- else if (!set && test_bit(hashbit, ipvlan->mac_filters))
- __clear_bit(hashbit, ipvlan->mac_filters);
-}
-
static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -238,6 +232,12 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
__set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
+ /* Turn-on broadcast bit irrespective of address family,
+ * since broadcast is deferred to a work-queue, hence no
+ * impact on fast-path processing.
+ */
+ __set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
+
bitmap_copy(ipvlan->mac_filters, mc_filters,
IPVLAN_MAC_FILTER_SIZE);
}
@@ -705,7 +705,6 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
*/
if (netif_running(ipvlan->dev))
ipvlan_ht_addr_add(ipvlan, addr);
- ipvlan_set_broadcast_mac_filter(ipvlan, true);
return 0;
}
@@ -722,8 +721,6 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
list_del(&addr->anode);
ipvlan->ipv4cnt--;
WARN_ON(ipvlan->ipv4cnt < 0);
- if (!ipvlan->ipv4cnt)
- ipvlan_set_broadcast_mac_filter(ipvlan, false);
kfree_rcu(addr, rcu);
return;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f6c916312577..25f21968fa5c 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -848,7 +848,9 @@ static void irda_usb_receive(struct urb *urb)
* Jean II */
self->rx_defer_timer.function = irda_usb_rx_defer_expired;
self->rx_defer_timer.data = (unsigned long) urb;
- mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+ mod_timer(&self->rx_defer_timer,
+ jiffies + msecs_to_jiffies(10));
+
return;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 8c350c5d54ad..483afb19596c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -476,7 +476,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
err = -ENOMEM;
q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
- &macvtap_proto);
+ &macvtap_proto, 0);
if (!q)
goto out;
@@ -1006,6 +1006,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
unsigned int __user *up = argp;
unsigned short u;
int __user *sp = argp;
+ struct sockaddr sa;
int s;
int ret;
@@ -1101,6 +1102,37 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return ret;
+ case SIOCGIFHWADDR:
+ rtnl_lock();
+ vlan = macvtap_get_vlan(q);
+ if (!vlan) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+ ret = 0;
+ u = vlan->dev->type;
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+ copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
+ put_user(u, &ifr->ifr_hwaddr.sa_family))
+ ret = -EFAULT;
+ macvtap_put_vlan(vlan);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCSIFHWADDR:
+ if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+ return -EFAULT;
+ rtnl_lock();
+ vlan = macvtap_get_vlan(q);
+ if (!vlan) {
+ rtnl_unlock();
+ return -ENOLINK;
+ }
+ ret = dev_set_mac_address(vlan->dev, &sa);
+ macvtap_put_vlan(vlan);
+ rtnl_unlock();
+ return ret;
+
default:
return -EINVAL;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 70641d2c0429..cf18940f4e84 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -24,13 +24,6 @@ config AMD_PHY
---help---
Currently supports the am79c874
-config AMD_XGBE_PHY
- tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
- depends on (OF || ACPI) && HAS_IOMEM
- depends on ARM64 || COMPILE_TEST
- ---help---
- Currently supports the AMD 10GbE PHY
-
config MARVELL_PHY
tristate "Drivers for Marvell PHYs"
---help---
@@ -119,6 +112,11 @@ config MICREL_PHY
---help---
Supports the KSZ9021, VSC8201, KS8001 PHYs.
+config DP83867_PHY
+ tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
+ ---help---
+ Currently supports the DP83867 PHY.
+
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -212,7 +210,6 @@ config MDIO_BCM_UNIMAC
This hardware can be found in the Broadcom GENET Ethernet MAC
controllers as well as some Broadcom Ethernet switches such as the
Starfighter 2 switches.
-
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 501ea7699a2d..fcc25a0c45cd 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
+obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
@@ -33,5 +34,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
-obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
deleted file mode 100644
index 34a75cba3b73..000000000000
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ /dev/null
@@ -1,1901 +0,0 @@
-/*
- * AMD 10Gb Ethernet PHY driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/mdio.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/jiffies.h>
-
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("1.0.0-a");
-MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
-
-#define XGBE_PHY_ID 0x000162d0
-#define XGBE_PHY_MASK 0xfffffff0
-
-#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set"
-#define XGBE_PHY_BLWC_PROPERTY "amd,serdes-blwc"
-#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
-#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
-#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
-#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
-#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
-
-#define XGBE_PHY_SPEEDS 3
-#define XGBE_PHY_SPEED_1000 0
-#define XGBE_PHY_SPEED_2500 1
-#define XGBE_PHY_SPEED_10000 2
-
-#define XGBE_AN_MS_TIMEOUT 500
-
-#define XGBE_AN_INT_CMPLT 0x01
-#define XGBE_AN_INC_LINK 0x02
-#define XGBE_AN_PG_RCV 0x04
-#define XGBE_AN_INT_MASK 0x07
-
-#define XNP_MCF_NULL_MESSAGE 0x001
-#define XNP_ACK_PROCESSED BIT(12)
-#define XNP_MP_FORMATTED BIT(13)
-#define XNP_NP_EXCHANGE BIT(15)
-
-#define XGBE_PHY_RATECHANGE_COUNT 500
-
-#define XGBE_PHY_KR_TRAINING_START 0x01
-#define XGBE_PHY_KR_TRAINING_ENABLE 0x02
-
-#define XGBE_PHY_FEC_ENABLE 0x01
-#define XGBE_PHY_FEC_FORWARD 0x02
-#define XGBE_PHY_FEC_MASK 0x03
-
-#ifndef MDIO_PMA_10GBR_PMD_CTRL
-#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_ABILITY
-#define MDIO_PMA_10GBR_FEC_ABILITY 0x00aa
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_CTRL
-#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab
-#endif
-
-#ifndef MDIO_AN_XNP
-#define MDIO_AN_XNP 0x0016
-#endif
-
-#ifndef MDIO_AN_LPX
-#define MDIO_AN_LPX 0x0019
-#endif
-
-#ifndef MDIO_AN_INTMASK
-#define MDIO_AN_INTMASK 0x8001
-#endif
-
-#ifndef MDIO_AN_INT
-#define MDIO_AN_INT 0x8002
-#endif
-
-#ifndef MDIO_CTRL1_SPEED1G
-#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
-#endif
-
-/* SerDes integration register offsets */
-#define SIR0_KR_RT_1 0x002c
-#define SIR0_STATUS 0x0040
-#define SIR1_SPEED 0x0000
-
-/* SerDes integration register entry bit positions and sizes */
-#define SIR0_KR_RT_1_RESET_INDEX 11
-#define SIR0_KR_RT_1_RESET_WIDTH 1
-#define SIR0_STATUS_RX_READY_INDEX 0
-#define SIR0_STATUS_RX_READY_WIDTH 1
-#define SIR0_STATUS_TX_READY_INDEX 8
-#define SIR0_STATUS_TX_READY_WIDTH 1
-#define SIR1_SPEED_CDR_RATE_INDEX 12
-#define SIR1_SPEED_CDR_RATE_WIDTH 4
-#define SIR1_SPEED_DATARATE_INDEX 4
-#define SIR1_SPEED_DATARATE_WIDTH 2
-#define SIR1_SPEED_PLLSEL_INDEX 3
-#define SIR1_SPEED_PLLSEL_WIDTH 1
-#define SIR1_SPEED_RATECHANGE_INDEX 6
-#define SIR1_SPEED_RATECHANGE_WIDTH 1
-#define SIR1_SPEED_TXAMP_INDEX 8
-#define SIR1_SPEED_TXAMP_WIDTH 4
-#define SIR1_SPEED_WORDMODE_INDEX 0
-#define SIR1_SPEED_WORDMODE_WIDTH 3
-
-#define SPEED_10000_BLWC 0
-#define SPEED_10000_CDR 0x7
-#define SPEED_10000_PLL 0x1
-#define SPEED_10000_PQ 0x12
-#define SPEED_10000_RATE 0x0
-#define SPEED_10000_TXAMP 0xa
-#define SPEED_10000_WORD 0x7
-#define SPEED_10000_DFE_TAP_CONFIG 0x1
-#define SPEED_10000_DFE_TAP_ENABLE 0x7f
-
-#define SPEED_2500_BLWC 1
-#define SPEED_2500_CDR 0x2
-#define SPEED_2500_PLL 0x0
-#define SPEED_2500_PQ 0xa
-#define SPEED_2500_RATE 0x1
-#define SPEED_2500_TXAMP 0xf
-#define SPEED_2500_WORD 0x1
-#define SPEED_2500_DFE_TAP_CONFIG 0x3
-#define SPEED_2500_DFE_TAP_ENABLE 0x0
-
-#define SPEED_1000_BLWC 1
-#define SPEED_1000_CDR 0x2
-#define SPEED_1000_PLL 0x0
-#define SPEED_1000_PQ 0xa
-#define SPEED_1000_RATE 0x3
-#define SPEED_1000_TXAMP 0xf
-#define SPEED_1000_WORD 0x1
-#define SPEED_1000_DFE_TAP_CONFIG 0x3
-#define SPEED_1000_DFE_TAP_ENABLE 0x0
-
-/* SerDes RxTx register offsets */
-#define RXTX_REG6 0x0018
-#define RXTX_REG20 0x0050
-#define RXTX_REG22 0x0058
-#define RXTX_REG114 0x01c8
-#define RXTX_REG129 0x0204
-
-/* SerDes RxTx register entry bit positions and sizes */
-#define RXTX_REG6_RESETB_RXD_INDEX 8
-#define RXTX_REG6_RESETB_RXD_WIDTH 1
-#define RXTX_REG20_BLWC_ENA_INDEX 2
-#define RXTX_REG20_BLWC_ENA_WIDTH 1
-#define RXTX_REG114_PQ_REG_INDEX 9
-#define RXTX_REG114_PQ_REG_WIDTH 7
-#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
-#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
-
-/* Bit setting and getting macros
- * The get macro will extract the current bit field value from within
- * the variable
- *
- * The set macro will clear the current bit field value within the
- * variable and then set the bit field of the variable to the
- * specified value
- */
-#define GET_BITS(_var, _index, _width) \
- (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
-
-#define SET_BITS(_var, _index, _width, _val) \
-do { \
- (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
- (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
-} while (0)
-
-#define XSIR_GET_BITS(_var, _prefix, _field) \
- GET_BITS((_var), \
- _prefix##_##_field##_INDEX, \
- _prefix##_##_field##_WIDTH)
-
-#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
- SET_BITS((_var), \
- _prefix##_##_field##_INDEX, \
- _prefix##_##_field##_WIDTH, (_val))
-
-/* Macros for reading or writing SerDes integration registers
- * The ioread macros will get bit fields or full values using the
- * register definitions formed using the input names
- *
- * The iowrite macros will set bit fields or full values using the
- * register definitions formed using the input names
- */
-#define XSIR0_IOREAD(_priv, _reg) \
- ioread16((_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \
- GET_BITS(XSIR0_IOREAD((_priv), _reg), \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH)
-
-#define XSIR0_IOWRITE(_priv, _reg, _val) \
- iowrite16((_val), (_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \
-do { \
- u16 reg_val = XSIR0_IOREAD((_priv), _reg); \
- SET_BITS(reg_val, \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH, (_val)); \
- XSIR0_IOWRITE((_priv), _reg, reg_val); \
-} while (0)
-
-#define XSIR1_IOREAD(_priv, _reg) \
- ioread16((_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \
- GET_BITS(XSIR1_IOREAD((_priv), _reg), \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH)
-
-#define XSIR1_IOWRITE(_priv, _reg, _val) \
- iowrite16((_val), (_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \
-do { \
- u16 reg_val = XSIR1_IOREAD((_priv), _reg); \
- SET_BITS(reg_val, \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH, (_val)); \
- XSIR1_IOWRITE((_priv), _reg, reg_val); \
-} while (0)
-
-/* Macros for reading or writing SerDes RxTx registers
- * The ioread macros will get bit fields or full values using the
- * register definitions formed using the input names
- *
- * The iowrite macros will set bit fields or full values using the
- * register definitions formed using the input names
- */
-#define XRXTX_IOREAD(_priv, _reg) \
- ioread16((_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \
- GET_BITS(XRXTX_IOREAD((_priv), _reg), \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH)
-
-#define XRXTX_IOWRITE(_priv, _reg, _val) \
- iowrite16((_val), (_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \
-do { \
- u16 reg_val = XRXTX_IOREAD((_priv), _reg); \
- SET_BITS(reg_val, \
- _reg##_##_field##_INDEX, \
- _reg##_##_field##_WIDTH, (_val)); \
- XRXTX_IOWRITE((_priv), _reg, reg_val); \
-} while (0)
-
-static const u32 amd_xgbe_phy_serdes_blwc[] = {
- SPEED_1000_BLWC,
- SPEED_2500_BLWC,
- SPEED_10000_BLWC,
-};
-
-static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
- SPEED_1000_CDR,
- SPEED_2500_CDR,
- SPEED_10000_CDR,
-};
-
-static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
- SPEED_1000_PQ,
- SPEED_2500_PQ,
- SPEED_10000_PQ,
-};
-
-static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
- SPEED_1000_TXAMP,
- SPEED_2500_TXAMP,
- SPEED_10000_TXAMP,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
- SPEED_1000_DFE_TAP_CONFIG,
- SPEED_2500_DFE_TAP_CONFIG,
- SPEED_10000_DFE_TAP_CONFIG,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
- SPEED_1000_DFE_TAP_ENABLE,
- SPEED_2500_DFE_TAP_ENABLE,
- SPEED_10000_DFE_TAP_ENABLE,
-};
-
-enum amd_xgbe_phy_an {
- AMD_XGBE_AN_READY = 0,
- AMD_XGBE_AN_PAGE_RECEIVED,
- AMD_XGBE_AN_INCOMPAT_LINK,
- AMD_XGBE_AN_COMPLETE,
- AMD_XGBE_AN_NO_LINK,
- AMD_XGBE_AN_ERROR,
-};
-
-enum amd_xgbe_phy_rx {
- AMD_XGBE_RX_BPA = 0,
- AMD_XGBE_RX_XNP,
- AMD_XGBE_RX_COMPLETE,
- AMD_XGBE_RX_ERROR,
-};
-
-enum amd_xgbe_phy_mode {
- AMD_XGBE_MODE_KR,
- AMD_XGBE_MODE_KX,
-};
-
-enum amd_xgbe_phy_speedset {
- AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
- AMD_XGBE_PHY_SPEEDSET_2500_10000,
-};
-
-struct amd_xgbe_phy_priv {
- struct platform_device *pdev;
- struct acpi_device *adev;
- struct device *dev;
-
- struct phy_device *phydev;
-
- /* SerDes related mmio resources */
- struct resource *rxtx_res;
- struct resource *sir0_res;
- struct resource *sir1_res;
-
- /* SerDes related mmio registers */
- void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
- void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
- void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
-
- int an_irq;
- char an_irq_name[IFNAMSIZ + 32];
- struct work_struct an_irq_work;
- unsigned int an_irq_allocated;
-
- unsigned int speed_set;
-
- /* SerDes UEFI configurable settings.
- * Switching between modes/speeds requires new values for some
- * SerDes settings. The values can be supplied as device
- * properties in array format. The first array entry is for
- * 1GbE, second for 2.5GbE and third for 10GbE
- */
- u32 serdes_blwc[XGBE_PHY_SPEEDS];
- u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
- u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
- u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
- u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
- u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
-
- /* Auto-negotiation state machine support */
- struct mutex an_mutex;
- enum amd_xgbe_phy_an an_result;
- enum amd_xgbe_phy_an an_state;
- enum amd_xgbe_phy_rx kr_state;
- enum amd_xgbe_phy_rx kx_state;
- struct work_struct an_work;
- struct workqueue_struct *an_workqueue;
- unsigned int an_supported;
- unsigned int parallel_detect;
- unsigned int fec_ability;
- unsigned long an_start;
-
- unsigned int lpm_ctrl; /* CTRL1 for resume */
-};
-
-static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (ret < 0)
- return ret;
-
- ret |= XGBE_PHY_KR_TRAINING_ENABLE;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
- return 0;
-}
-
-static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (ret < 0)
- return ret;
-
- ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
- return 0;
-}
-
-static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret |= MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- usleep_range(75, 100);
-
- ret &= ~MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- return 0;
-}
-
-static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
-
- /* Assert Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
-}
-
-static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- unsigned int wait;
- u16 status;
-
- /* Release Rx and Tx ratechange */
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
-
- /* Wait for Rx and Tx ready */
- wait = XGBE_PHY_RATECHANGE_COUNT;
- while (wait--) {
- usleep_range(50, 75);
-
- status = XSIR0_IOREAD(priv, SIR0_STATUS);
- if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
- XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- goto rx_reset;
- }
-
- netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
- status);
-
-rx_reset:
- /* Perform Rx reset for the DFE changes */
- XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
-}
-
-static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Enable KR training */
- ret = amd_xgbe_an_enable_kr_training(phydev);
- if (ret < 0)
- return ret;
-
- /* Set PCS to KR/10G speed */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_PCS_CTRL2_TYPE;
- ret |= MDIO_PCS_CTRL2_10GBR;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_CTRL1_SPEEDSEL;
- ret |= MDIO_CTRL1_SPEED10G;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = amd_xgbe_phy_pcs_power_cycle(phydev);
- if (ret < 0)
- return ret;
-
- /* Set SerDes to 10G speed */
- amd_xgbe_phy_serdes_start_ratechange(phydev);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
- priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
- priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
- priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
- priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
- priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
- XRXTX_IOWRITE(priv, RXTX_REG22,
- priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
-
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
- return 0;
-}
-
-static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Disable KR training */
- ret = amd_xgbe_an_disable_kr_training(phydev);
- if (ret < 0)
- return ret;
-
- /* Set PCS to KX/1G speed */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_PCS_CTRL2_TYPE;
- ret |= MDIO_PCS_CTRL2_10GBX;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_CTRL1_SPEEDSEL;
- ret |= MDIO_CTRL1_SPEED1G;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = amd_xgbe_phy_pcs_power_cycle(phydev);
- if (ret < 0)
- return ret;
-
- /* Set SerDes to 2.5G speed */
- amd_xgbe_phy_serdes_start_ratechange(phydev);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
- priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
- priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
- priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
- priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
- priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
- XRXTX_IOWRITE(priv, RXTX_REG22,
- priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
-
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
- return 0;
-}
-
-static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Disable KR training */
- ret = amd_xgbe_an_disable_kr_training(phydev);
- if (ret < 0)
- return ret;
-
- /* Set PCS to KX/1G speed */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_PCS_CTRL2_TYPE;
- ret |= MDIO_PCS_CTRL2_10GBX;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_CTRL1_SPEEDSEL;
- ret |= MDIO_CTRL1_SPEED1G;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = amd_xgbe_phy_pcs_power_cycle(phydev);
- if (ret < 0)
- return ret;
-
- /* Set SerDes to 1G speed */
- amd_xgbe_phy_serdes_start_ratechange(phydev);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
-
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
- priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
- XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
- priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
- priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
- priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
- priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
- XRXTX_IOWRITE(priv, RXTX_REG22,
- priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
-
- amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
- return 0;
-}
-
-static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
- enum amd_xgbe_phy_mode *mode)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
- if (ret < 0)
- return ret;
-
- if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
- *mode = AMD_XGBE_MODE_KR;
- else
- *mode = AMD_XGBE_MODE_KX;
-
- return 0;
-}
-
-static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
-{
- enum amd_xgbe_phy_mode mode;
-
- if (amd_xgbe_phy_cur_mode(phydev, &mode))
- return false;
-
- return (mode == AMD_XGBE_MODE_KR);
-}
-
-static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* If we are in KR switch to KX, and vice-versa */
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
- if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
- ret = amd_xgbe_phy_gmii_mode(phydev);
- else
- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
- } else {
- ret = amd_xgbe_phy_xgmii_mode(phydev);
- }
-
- return ret;
-}
-
-static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
- enum amd_xgbe_phy_mode mode)
-{
- enum amd_xgbe_phy_mode cur_mode;
- int ret;
-
- ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
- if (ret)
- return ret;
-
- if (mode != cur_mode)
- ret = amd_xgbe_phy_switch_mode(phydev);
-
- return ret;
-}
-
-static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
-{
- if (phydev->autoneg == AUTONEG_ENABLE) {
- if (phydev->advertising & ADVERTISED_10000baseKR_Full)
- return true;
- } else {
- if (phydev->speed == SPEED_10000)
- return true;
- }
-
- return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
-{
- if (phydev->autoneg == AUTONEG_ENABLE) {
- if (phydev->advertising & ADVERTISED_2500baseX_Full)
- return true;
- } else {
- if (phydev->speed == SPEED_2500)
- return true;
- }
-
- return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
-{
- if (phydev->autoneg == AUTONEG_ENABLE) {
- if (phydev->advertising & ADVERTISED_1000baseKX_Full)
- return true;
- } else {
- if (phydev->speed == SPEED_1000)
- return true;
- }
-
- return false;
-}
-
-static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
- bool restart)
-{
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret &= ~MDIO_AN_CTRL1_ENABLE;
-
- if (enable)
- ret |= MDIO_AN_CTRL1_ENABLE;
-
- if (restart)
- ret |= MDIO_AN_CTRL1_RESTART;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
-
- return 0;
-}
-
-static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
-{
- return amd_xgbe_phy_set_an(phydev, true, true);
-}
-
-static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
-{
- return amd_xgbe_phy_set_an(phydev, false, false);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ad_reg, lp_reg, ret;
-
- *state = AMD_XGBE_RX_COMPLETE;
-
- /* If we're not in KR mode then we're done */
- if (!amd_xgbe_phy_in_kr_mode(phydev))
- return AMD_XGBE_AN_PAGE_RECEIVED;
-
- /* Enable/Disable FEC */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (ad_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
- if (lp_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
-
- ret &= ~XGBE_PHY_FEC_MASK;
- if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
- ret |= priv->fec_ability;
-
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
-
- /* Start KR training */
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
-
- if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
-
- ret |= XGBE_PHY_KR_TRAINING_START;
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
- ret);
-
- XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
- }
-
- return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- u16 msg;
-
- *state = AMD_XGBE_RX_XNP;
-
- msg = XNP_MCF_NULL_MESSAGE;
- msg |= XNP_MP_FORMATTED;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
-
- return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- unsigned int link_support;
- int ret, ad_reg, lp_reg;
-
- /* Read Base Ability register 2 first */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
- if (ret < 0)
- return AMD_XGBE_AN_ERROR;
-
- /* Check for a supported mode, otherwise restart in a different one */
- link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
- if (!(ret & link_support))
- return AMD_XGBE_AN_INCOMPAT_LINK;
-
- /* Check Extended Next Page support */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ad_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
- if (lp_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
- amd_xgbe_an_tx_xnp(phydev, state) :
- amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
- enum amd_xgbe_phy_rx *state)
-{
- int ad_reg, lp_reg;
-
- /* Check Extended Next Page support */
- ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
- if (ad_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
- if (lp_reg < 0)
- return AMD_XGBE_AN_ERROR;
-
- return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
- amd_xgbe_an_tx_xnp(phydev, state) :
- amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- enum amd_xgbe_phy_rx *state;
- unsigned long an_timeout;
- int ret;
-
- if (!priv->an_start) {
- priv->an_start = jiffies;
- } else {
- an_timeout = priv->an_start +
- msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
- if (time_after(jiffies, an_timeout)) {
- /* Auto-negotiation timed out, reset state */
- priv->kr_state = AMD_XGBE_RX_BPA;
- priv->kx_state = AMD_XGBE_RX_BPA;
-
- priv->an_start = jiffies;
- }
- }
-
- state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
- : &priv->kx_state;
-
- switch (*state) {
- case AMD_XGBE_RX_BPA:
- ret = amd_xgbe_an_rx_bpa(phydev, state);
- break;
-
- case AMD_XGBE_RX_XNP:
- ret = amd_xgbe_an_rx_xnp(phydev, state);
- break;
-
- default:
- ret = AMD_XGBE_AN_ERROR;
- }
-
- return ret;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* Be sure we aren't looping trying to negotiate */
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
- priv->kr_state = AMD_XGBE_RX_ERROR;
-
- if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
- !(phydev->advertising & SUPPORTED_2500baseX_Full))
- return AMD_XGBE_AN_NO_LINK;
-
- if (priv->kx_state != AMD_XGBE_RX_BPA)
- return AMD_XGBE_AN_NO_LINK;
- } else {
- priv->kx_state = AMD_XGBE_RX_ERROR;
-
- if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
- return AMD_XGBE_AN_NO_LINK;
-
- if (priv->kr_state != AMD_XGBE_RX_BPA)
- return AMD_XGBE_AN_NO_LINK;
- }
-
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
-
- ret = amd_xgbe_phy_switch_mode(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
-
- ret = amd_xgbe_phy_restart_an(phydev);
- if (ret)
- return AMD_XGBE_AN_ERROR;
-
- return AMD_XGBE_AN_INCOMPAT_LINK;
-}
-
-static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
-{
- struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
-
- /* Interrupt reason must be read and cleared outside of IRQ context */
- disable_irq_nosync(priv->an_irq);
-
- queue_work(priv->an_workqueue, &priv->an_irq_work);
-
- return IRQ_HANDLED;
-}
-
-static void amd_xgbe_an_irq_work(struct work_struct *work)
-{
- struct amd_xgbe_phy_priv *priv = container_of(work,
- struct amd_xgbe_phy_priv,
- an_irq_work);
-
- /* Avoid a race between enabling the IRQ and exiting the work by
- * waiting for the work to finish and then queueing it
- */
- flush_work(&priv->an_work);
- queue_work(priv->an_workqueue, &priv->an_work);
-}
-
-static void amd_xgbe_an_state_machine(struct work_struct *work)
-{
- struct amd_xgbe_phy_priv *priv = container_of(work,
- struct amd_xgbe_phy_priv,
- an_work);
- struct phy_device *phydev = priv->phydev;
- enum amd_xgbe_phy_an cur_state = priv->an_state;
- int int_reg, int_mask;
-
- mutex_lock(&priv->an_mutex);
-
- /* Read the interrupt */
- int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
- if (!int_reg)
- goto out;
-
-next_int:
- if (int_reg < 0) {
- priv->an_state = AMD_XGBE_AN_ERROR;
- int_mask = XGBE_AN_INT_MASK;
- } else if (int_reg & XGBE_AN_PG_RCV) {
- priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
- int_mask = XGBE_AN_PG_RCV;
- } else if (int_reg & XGBE_AN_INC_LINK) {
- priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
- int_mask = XGBE_AN_INC_LINK;
- } else if (int_reg & XGBE_AN_INT_CMPLT) {
- priv->an_state = AMD_XGBE_AN_COMPLETE;
- int_mask = XGBE_AN_INT_CMPLT;
- } else {
- priv->an_state = AMD_XGBE_AN_ERROR;
- int_mask = 0;
- }
-
- /* Clear the interrupt to be processed */
- int_reg &= ~int_mask;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
-
- priv->an_result = priv->an_state;
-
-again:
- cur_state = priv->an_state;
-
- switch (priv->an_state) {
- case AMD_XGBE_AN_READY:
- priv->an_supported = 0;
- break;
-
- case AMD_XGBE_AN_PAGE_RECEIVED:
- priv->an_state = amd_xgbe_an_page_received(phydev);
- priv->an_supported++;
- break;
-
- case AMD_XGBE_AN_INCOMPAT_LINK:
- priv->an_supported = 0;
- priv->parallel_detect = 0;
- priv->an_state = amd_xgbe_an_incompat_link(phydev);
- break;
-
- case AMD_XGBE_AN_COMPLETE:
- priv->parallel_detect = priv->an_supported ? 0 : 1;
- netdev_dbg(phydev->attached_dev, "%s successful\n",
- priv->an_supported ? "Auto negotiation"
- : "Parallel detection");
- break;
-
- case AMD_XGBE_AN_NO_LINK:
- break;
-
- default:
- priv->an_state = AMD_XGBE_AN_ERROR;
- }
-
- if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
- int_reg = 0;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
- } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
- netdev_err(phydev->attached_dev,
- "error during auto-negotiation, state=%u\n",
- cur_state);
-
- int_reg = 0;
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
- }
-
- if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
- priv->an_result = priv->an_state;
- priv->an_state = AMD_XGBE_AN_READY;
- priv->kr_state = AMD_XGBE_RX_BPA;
- priv->kx_state = AMD_XGBE_RX_BPA;
- priv->an_start = 0;
- }
-
- if (cur_state != priv->an_state)
- goto again;
-
- if (int_reg)
- goto next_int;
-
-out:
- enable_irq(priv->an_irq);
-
- mutex_unlock(&priv->an_mutex);
-}
-
-static int amd_xgbe_an_init(struct phy_device *phydev)
-{
- int ret;
-
- /* Set up Advertisement register 3 first */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (ret < 0)
- return ret;
-
- if (phydev->advertising & SUPPORTED_10000baseR_FEC)
- ret |= 0xc000;
- else
- ret &= ~0xc000;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
-
- /* Set up Advertisement register 2 next */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
- if (ret < 0)
- return ret;
-
- if (phydev->advertising & SUPPORTED_10000baseKR_Full)
- ret |= 0x80;
- else
- ret &= ~0x80;
-
- if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
- (phydev->advertising & SUPPORTED_2500baseX_Full))
- ret |= 0x20;
- else
- ret &= ~0x20;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
-
- /* Set up Advertisement register 1 last */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ret < 0)
- return ret;
-
- if (phydev->advertising & SUPPORTED_Pause)
- ret |= 0x400;
- else
- ret &= ~0x400;
-
- if (phydev->advertising & SUPPORTED_Asym_Pause)
- ret |= 0x800;
- else
- ret &= ~0x800;
-
- /* We don't intend to perform XNP */
- ret &= ~XNP_NP_EXCHANGE;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
-
- return 0;
-}
-
-static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
-{
- int count, ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
-
- ret |= MDIO_CTRL1_RESET;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- count = 50;
- do {
- msleep(20);
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- return ret;
- } while ((ret & MDIO_CTRL1_RESET) && --count);
-
- if (ret & MDIO_CTRL1_RESET)
- return -ETIMEDOUT;
-
- /* Disable auto-negotiation for now */
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret < 0)
- return ret;
-
- /* Clear auto-negotiation interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
- return 0;
-}
-
-static int amd_xgbe_phy_config_init(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- struct net_device *netdev = phydev->attached_dev;
- int ret;
-
- if (!priv->an_irq_allocated) {
- /* Allocate the auto-negotiation workqueue and interrupt */
- snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
- "%s-pcs", netdev_name(netdev));
-
- priv->an_workqueue =
- create_singlethread_workqueue(priv->an_irq_name);
- if (!priv->an_workqueue) {
- netdev_err(netdev, "phy workqueue creation failed\n");
- return -ENOMEM;
- }
-
- ret = devm_request_irq(priv->dev, priv->an_irq,
- amd_xgbe_an_isr, 0, priv->an_irq_name,
- priv);
- if (ret) {
- netdev_err(netdev, "phy irq request failed\n");
- destroy_workqueue(priv->an_workqueue);
- return ret;
- }
-
- priv->an_irq_allocated = 1;
- }
-
- /* Set initial mode - call the mode setting routines
- * directly to insure we are properly configured
- */
- if (amd_xgbe_phy_use_xgmii_mode(phydev))
- ret = amd_xgbe_phy_xgmii_mode(phydev);
- else if (amd_xgbe_phy_use_gmii_mode(phydev))
- ret = amd_xgbe_phy_gmii_mode(phydev);
- else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
- ret = amd_xgbe_phy_gmii_2500_mode(phydev);
- else
- ret = -EINVAL;
- if (ret < 0)
- return ret;
-
- /* Set up advertisement registers based on current settings */
- ret = amd_xgbe_an_init(phydev);
- if (ret)
- return ret;
-
- /* Enable auto-negotiation interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-
- return 0;
-}
-
-static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
-{
- int ret;
-
- /* Disable auto-negotiation */
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret < 0)
- return ret;
-
- /* Validate/Set specified speed */
- switch (phydev->speed) {
- case SPEED_10000:
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- break;
-
- case SPEED_2500:
- case SPEED_1000:
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
- break;
-
- default:
- ret = -EINVAL;
- }
-
- if (ret < 0)
- return ret;
-
- /* Validate duplex mode */
- if (phydev->duplex != DUPLEX_FULL)
- return -EINVAL;
-
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- return 0;
-}
-
-static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
- int ret;
-
- if (phydev->autoneg != AUTONEG_ENABLE)
- return amd_xgbe_phy_setup_forced(phydev);
-
- /* Make sure we have the AN MMD present */
- if (!(mmd_mask & MDIO_DEVS_AN))
- return -EINVAL;
-
- /* Disable auto-negotiation interrupt */
- disable_irq(priv->an_irq);
-
- /* Start auto-negotiation in a supported mode */
- if (phydev->advertising & SUPPORTED_10000baseKR_Full)
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
- (phydev->advertising & SUPPORTED_2500baseX_Full))
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
- else
- ret = -EINVAL;
- if (ret < 0) {
- enable_irq(priv->an_irq);
- return ret;
- }
-
- /* Disable and stop any in progress auto-negotiation */
- ret = amd_xgbe_phy_disable_an(phydev);
- if (ret < 0)
- return ret;
-
- /* Clear any auto-negotitation interrupts */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
- priv->an_result = AMD_XGBE_AN_READY;
- priv->an_state = AMD_XGBE_AN_READY;
- priv->kr_state = AMD_XGBE_RX_BPA;
- priv->kx_state = AMD_XGBE_RX_BPA;
-
- /* Re-enable auto-negotiation interrupt */
- enable_irq(priv->an_irq);
-
- /* Set up advertisement registers based on current settings */
- ret = amd_xgbe_an_init(phydev);
- if (ret)
- return ret;
-
- /* Enable and start auto-negotiation */
- return amd_xgbe_phy_restart_an(phydev);
-}
-
-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- mutex_lock(&priv->an_mutex);
-
- ret = __amd_xgbe_phy_config_aneg(phydev);
-
- mutex_unlock(&priv->an_mutex);
-
- return ret;
-}
-
-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
-
- return (priv->an_result == AMD_XGBE_AN_COMPLETE);
-}
-
-static int amd_xgbe_phy_update_link(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- /* If we're doing auto-negotiation don't report link down */
- if (priv->an_state != AMD_XGBE_AN_READY) {
- phydev->link = 1;
- return 0;
- }
-
- /* Link status is latched low, so read once to clear
- * and then read again to get current state
- */
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
- if (ret < 0)
- return ret;
-
- phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
-
- return 0;
-}
-
-static int amd_xgbe_phy_read_status(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- u32 mmd_mask = phydev->c45_ids.devices_in_package;
- int ret, ad_ret, lp_ret;
-
- ret = amd_xgbe_phy_update_link(phydev);
- if (ret)
- return ret;
-
- if ((phydev->autoneg == AUTONEG_ENABLE) &&
- !priv->parallel_detect) {
- if (!(mmd_mask & MDIO_DEVS_AN))
- return -EINVAL;
-
- if (!amd_xgbe_phy_aneg_done(phydev))
- return 0;
-
- /* Compare Advertisement and Link Partner register 1 */
- ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
- if (ad_ret < 0)
- return ad_ret;
- lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
- if (lp_ret < 0)
- return lp_ret;
-
- ad_ret &= lp_ret;
- phydev->pause = (ad_ret & 0x400) ? 1 : 0;
- phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
-
- /* Compare Advertisement and Link Partner register 2 */
- ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_ADVERTISE + 1);
- if (ad_ret < 0)
- return ad_ret;
- lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
- if (lp_ret < 0)
- return lp_ret;
-
- ad_ret &= lp_ret;
- if (ad_ret & 0x80) {
- phydev->speed = SPEED_10000;
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
- if (ret)
- return ret;
- } else {
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->speed = SPEED_1000;
- break;
-
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->speed = SPEED_2500;
- break;
- }
-
- ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
- if (ret)
- return ret;
- }
-
- phydev->duplex = DUPLEX_FULL;
- } else {
- if (amd_xgbe_phy_in_kr_mode(phydev)) {
- phydev->speed = SPEED_10000;
- } else {
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->speed = SPEED_1000;
- break;
-
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->speed = SPEED_2500;
- break;
- }
- }
- phydev->duplex = DUPLEX_FULL;
- phydev->pause = 0;
- phydev->asym_pause = 0;
- }
-
- return 0;
-}
-
-static int amd_xgbe_phy_suspend(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- int ret;
-
- mutex_lock(&phydev->lock);
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
- if (ret < 0)
- goto unlock;
-
- priv->lpm_ctrl = ret;
-
- ret |= MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
- ret = 0;
-
-unlock:
- mutex_unlock(&phydev->lock);
-
- return ret;
-}
-
-static int amd_xgbe_phy_resume(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
-
- mutex_lock(&phydev->lock);
-
- priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
-
- mutex_unlock(&phydev->lock);
-
- return 0;
-}
-
-static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
- unsigned int type)
-{
- unsigned int count;
- int i;
-
- for (i = 0, count = 0; i < pdev->num_resources; i++) {
- struct resource *r = &pdev->resource[i];
-
- if (type == resource_type(r))
- count++;
- }
-
- return count;
-}
-
-static int amd_xgbe_phy_probe(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv;
- struct platform_device *phy_pdev;
- struct device *dev, *phy_dev;
- unsigned int phy_resnum, phy_irqnum;
- int ret;
-
- if (!phydev->bus || !phydev->bus->parent)
- return -EINVAL;
-
- dev = phydev->bus->parent;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->pdev = to_platform_device(dev);
- priv->adev = ACPI_COMPANION(dev);
- priv->dev = dev;
- priv->phydev = phydev;
- mutex_init(&priv->an_mutex);
- INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
- INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
-
- if (!priv->adev || acpi_disabled) {
- struct device_node *bus_node;
- struct device_node *phy_node;
-
- bus_node = priv->dev->of_node;
- phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
- if (!phy_node) {
- dev_err(dev, "unable to parse phy-handle\n");
- ret = -EINVAL;
- goto err_priv;
- }
-
- phy_pdev = of_find_device_by_node(phy_node);
- of_node_put(phy_node);
-
- if (!phy_pdev) {
- dev_err(dev, "unable to obtain phy device\n");
- ret = -EINVAL;
- goto err_priv;
- }
-
- phy_resnum = 0;
- phy_irqnum = 0;
- } else {
- /* In ACPI, the XGBE and PHY resources are the grouped
- * together with the PHY resources at the end
- */
- phy_pdev = priv->pdev;
- phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
- IORESOURCE_MEM) - 3;
- phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
- IORESOURCE_IRQ) - 1;
- }
- phy_dev = &phy_pdev->dev;
-
- /* Get the device mmio areas */
- priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
- phy_resnum++);
- priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
- if (IS_ERR(priv->rxtx_regs)) {
- dev_err(dev, "rxtx ioremap failed\n");
- ret = PTR_ERR(priv->rxtx_regs);
- goto err_put;
- }
-
- priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
- phy_resnum++);
- priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
- if (IS_ERR(priv->sir0_regs)) {
- dev_err(dev, "sir0 ioremap failed\n");
- ret = PTR_ERR(priv->sir0_regs);
- goto err_rxtx;
- }
-
- priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
- phy_resnum++);
- priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
- if (IS_ERR(priv->sir1_regs)) {
- dev_err(dev, "sir1 ioremap failed\n");
- ret = PTR_ERR(priv->sir1_regs);
- goto err_sir0;
- }
-
- /* Get the auto-negotiation interrupt */
- ret = platform_get_irq(phy_pdev, phy_irqnum);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq failed\n");
- goto err_sir1;
- }
- priv->an_irq = ret;
-
- /* Get the device speed set property */
- ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
- &priv->speed_set);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_SPEEDSET_PROPERTY);
- goto err_sir1;
- }
-
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- break;
- default:
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_SPEEDSET_PROPERTY);
- ret = -EINVAL;
- goto err_sir1;
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_BLWC_PROPERTY,
- priv->serdes_blwc,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_BLWC_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
- sizeof(priv->serdes_blwc));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_CDR_RATE_PROPERTY,
- priv->serdes_cdr_rate,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_CDR_RATE_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
- sizeof(priv->serdes_cdr_rate));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_PQ_SKEW_PROPERTY,
- priv->serdes_pq_skew,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_PQ_SKEW_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
- sizeof(priv->serdes_pq_skew));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_TX_AMP_PROPERTY,
- priv->serdes_tx_amp,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_TX_AMP_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
- sizeof(priv->serdes_tx_amp));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_DFE_CFG_PROPERTY,
- priv->serdes_dfe_tap_cfg,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_DFE_CFG_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_dfe_tap_cfg,
- amd_xgbe_phy_serdes_dfe_tap_cfg,
- sizeof(priv->serdes_dfe_tap_cfg));
- }
-
- if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PHY_DFE_ENA_PROPERTY,
- priv->serdes_dfe_tap_ena,
- XGBE_PHY_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PHY_DFE_ENA_PROPERTY);
- goto err_sir1;
- }
- } else {
- memcpy(priv->serdes_dfe_tap_ena,
- amd_xgbe_phy_serdes_dfe_tap_ena,
- sizeof(priv->serdes_dfe_tap_ena));
- }
-
- /* Initialize supported features */
- phydev->supported = SUPPORTED_Autoneg;
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported |= SUPPORTED_Backplane;
- phydev->supported |= SUPPORTED_10000baseKR_Full;
- switch (priv->speed_set) {
- case AMD_XGBE_PHY_SPEEDSET_1000_10000:
- phydev->supported |= SUPPORTED_1000baseKX_Full;
- break;
- case AMD_XGBE_PHY_SPEEDSET_2500_10000:
- phydev->supported |= SUPPORTED_2500baseX_Full;
- break;
- }
-
- ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
- if (ret < 0)
- return ret;
- priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
- if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
- phydev->supported |= SUPPORTED_10000baseR_FEC;
-
- phydev->advertising = phydev->supported;
-
- phydev->priv = priv;
-
- if (!priv->adev || acpi_disabled)
- platform_device_put(phy_pdev);
-
- return 0;
-
-err_sir1:
- devm_iounmap(dev, priv->sir1_regs);
- devm_release_mem_region(dev, priv->sir1_res->start,
- resource_size(priv->sir1_res));
-
-err_sir0:
- devm_iounmap(dev, priv->sir0_regs);
- devm_release_mem_region(dev, priv->sir0_res->start,
- resource_size(priv->sir0_res));
-
-err_rxtx:
- devm_iounmap(dev, priv->rxtx_regs);
- devm_release_mem_region(dev, priv->rxtx_res->start,
- resource_size(priv->rxtx_res));
-
-err_put:
- if (!priv->adev || acpi_disabled)
- platform_device_put(phy_pdev);
-
-err_priv:
- devm_kfree(dev, priv);
-
- return ret;
-}
-
-static void amd_xgbe_phy_remove(struct phy_device *phydev)
-{
- struct amd_xgbe_phy_priv *priv = phydev->priv;
- struct device *dev = priv->dev;
-
- if (priv->an_irq_allocated) {
- devm_free_irq(dev, priv->an_irq, priv);
-
- flush_workqueue(priv->an_workqueue);
- destroy_workqueue(priv->an_workqueue);
- }
-
- /* Release resources */
- devm_iounmap(dev, priv->sir1_regs);
- devm_release_mem_region(dev, priv->sir1_res->start,
- resource_size(priv->sir1_res));
-
- devm_iounmap(dev, priv->sir0_regs);
- devm_release_mem_region(dev, priv->sir0_res->start,
- resource_size(priv->sir0_res));
-
- devm_iounmap(dev, priv->rxtx_regs);
- devm_release_mem_region(dev, priv->rxtx_res->start,
- resource_size(priv->rxtx_res));
-
- devm_kfree(dev, priv);
-}
-
-static int amd_xgbe_match_phy_device(struct phy_device *phydev)
-{
- return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
-}
-
-static struct phy_driver amd_xgbe_phy_driver[] = {
- {
- .phy_id = XGBE_PHY_ID,
- .phy_id_mask = XGBE_PHY_MASK,
- .name = "AMD XGBE PHY",
- .features = 0,
- .flags = PHY_IS_INTERNAL,
- .probe = amd_xgbe_phy_probe,
- .remove = amd_xgbe_phy_remove,
- .soft_reset = amd_xgbe_phy_soft_reset,
- .config_init = amd_xgbe_phy_config_init,
- .suspend = amd_xgbe_phy_suspend,
- .resume = amd_xgbe_phy_resume,
- .config_aneg = amd_xgbe_phy_config_aneg,
- .aneg_done = amd_xgbe_phy_aneg_done,
- .read_status = amd_xgbe_phy_read_status,
- .match_phy_device = amd_xgbe_match_phy_device,
- .driver = {
- .owner = THIS_MODULE,
- },
- },
-};
-
-module_phy_driver(amd_xgbe_phy_driver);
-
-static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
- { XGBE_PHY_ID, XGBE_PHY_MASK },
- { }
-};
-MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b5dc59de094e..4dea85bfc545 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -136,8 +136,8 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
/* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
- /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
- phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+ /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
@@ -167,6 +167,9 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
/* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+ /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+ phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+
/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 0d16c7d9e1bf..2a328703b4ae 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -158,6 +158,18 @@ static struct phy_driver dm91xx_driver[] = {
.config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = 0x0181b8b0,
+ .name = "Davicom DM9161B/C",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = dm9161_config_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
@@ -186,6 +198,7 @@ module_phy_driver(dm91xx_driver);
static struct mdio_device_id __maybe_unused davicom_tbl[] = {
{ 0x0181b880, 0x0ffffff0 },
+ { 0x0181b8b0, 0x0ffffff0 },
{ 0x0181b8a0, 0x0ffffff0 },
{ 0x00181b80, 0x0ffffff0 },
{ }
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
new file mode 100644
index 000000000000..c7a12e2e07b7
--- /dev/null
+++ b/drivers/net/phy/dp83867.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for the Texas Instruments DP83867 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include <dt-bindings/net/ti-dp83867.h>
+
+#define DP83867_PHY_ID 0x2000a231
+#define DP83867_DEVADDR 0x1f
+
+#define MII_DP83867_PHYCTRL 0x10
+#define MII_DP83867_MICR 0x12
+#define MII_DP83867_ISR 0x13
+#define DP83867_CTRL 0x1f
+
+/* Extended Registers */
+#define DP83867_RGMIICTL 0x0032
+#define DP83867_RGMIIDCTL 0x0086
+
+#define DP83867_SW_RESET BIT(15)
+#define DP83867_SW_RESTART BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83867_MICR_AN_ERR_INT_EN BIT(15)
+#define MII_DP83867_MICR_SPEED_CHNG_INT_EN BIT(14)
+#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN BIT(13)
+#define MII_DP83867_MICR_PAGE_RXD_INT_EN BIT(12)
+#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN BIT(11)
+#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN BIT(10)
+#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN BIT(8)
+#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4)
+#define MII_DP83867_MICR_WOL_INT_EN BIT(3)
+#define MII_DP83867_MICR_XGMII_ERR_INT_EN BIT(2)
+#define MII_DP83867_MICR_POL_CHNG_INT_EN BIT(1)
+#define MII_DP83867_MICR_JABBER_INT_EN BIT(0)
+
+/* RGMIICTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
+#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+
+/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+
+struct dp83867_private {
+ int rx_id_delay;
+ int tx_id_delay;
+ int fifo_depth;
+};
+
+static int dp83867_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83867_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83867_config_intr(struct phy_device *phydev)
+{
+ int micr_status;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ micr_status = phy_read(phydev, MII_DP83867_MICR);
+ if (micr_status < 0)
+ return micr_status;
+
+ micr_status |=
+ (MII_DP83867_MICR_AN_ERR_INT_EN |
+ MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
+ MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+ return phy_write(phydev, MII_DP83867_MICR, micr_status);
+ }
+
+ micr_status = 0x0;
+ return phy_write(phydev, MII_DP83867_MICR, micr_status);
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83867_of_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ int ret;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (!phydev->dev.of_node)
+ return -ENODEV;
+
+ ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
+ &dp83867->rx_id_delay);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
+ &dp83867->tx_id_delay);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "ti,fifo-depth",
+ &dp83867->fifo_depth);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#else
+static int dp83867_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867;
+ int ret;
+ u16 val, delay;
+
+ if (!phydev->priv) {
+ dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867),
+ GFP_KERNEL);
+ if (!dp83867)
+ return -ENOMEM;
+
+ phydev->priv = dp83867;
+ ret = dp83867_of_init(phydev);
+ if (ret)
+ return ret;
+ } else {
+ dp83867 = (struct dp83867_private *)phydev->priv;
+ }
+
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+ (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ if (ret)
+ return ret;
+ }
+
+ if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
+ val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, phydev->addr);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, phydev->addr, val);
+
+ delay = (dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
+ DP83867_DEVADDR, phydev->addr, delay);
+ }
+
+ return 0;
+}
+
+static int dp83867_phy_reset(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ if (err < 0)
+ return err;
+
+ return dp83867_config_init(phydev);
+}
+
+static struct phy_driver dp83867_driver[] = {
+ {
+ .phy_id = DP83867_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83867",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+
+ .config_init = dp83867_config_init,
+ .soft_reset = dp83867_phy_reset,
+
+ /* IRQ related */
+ .ack_interrupt = dp83867_ack_interrupt,
+ .config_intr = dp83867_config_intr,
+
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+
+ .driver = {.owner = THIS_MODULE,}
+ },
+};
+module_phy_driver(dp83867_driver);
+
+static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+ { DP83867_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 8644f039d922..0dbc445a5fa0 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -139,10 +139,7 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1b1698f98818..f721444c2b0a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -317,10 +317,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -469,10 +466,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
int err;
int temp;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
if (temp < 0)
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 414fdf1f343f..fc7abc50b4f1 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -81,7 +81,13 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
return -ETIMEDOUT;
cmd = __raw_readl(priv->base + MDIO_CMD);
- if (cmd & MDIO_READ_FAIL)
+
+ /* Some broken devices are known not to release the line during
+ * turn-around, e.g: Broadcom BCM53125 external switches, so check for
+ * that condition here and ignore the MDIO controller read failure
+ * indication.
+ */
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
return -EIO;
return cmd & 0xffff;
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index daec9b05d168..61a543c788cc 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -165,8 +165,11 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
ctrl->ops->set_mdio_dir(ctrl, 0);
- /* check the turnaround bit: the PHY should be driving it to zero */
- if (mdiobb_get_bit(ctrl) != 0) {
+ /* check the turnaround bit: the PHY should be driving it to zero, if this
+ * PHY is listed in phy_ignore_ta_mask as having broken TA, skip that
+ */
+ if (mdiobb_get_bit(ctrl) != 0 &&
+ !(bus->phy_ignore_ta_mask & (1 << phy))) {
/* PHY didn't drive TA low -- flush any bits it
* may be trying to send.
*/
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 53d18150f4e2..7dc21e56a7aa 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -158,6 +158,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
new_bus->name = "GPIO Bitbanged MDIO",
new_bus->phy_mask = pdata->phy_mask;
+ new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
new_bus->irq = pdata->irqs;
new_bus->parent = dev;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ebdc357c5131..499185eaf413 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -288,9 +288,10 @@ static int kszphy_config_init(struct phy_device *phydev)
}
static int ksz9021_load_values_from_of(struct phy_device *phydev,
- struct device_node *of_node, u16 reg,
- char *field1, char *field2,
- char *field3, char *field4)
+ const struct device_node *of_node,
+ u16 reg,
+ const char *field1, const char *field2,
+ const char *field3, const char *field4)
{
int val1 = -1;
int val2 = -2;
@@ -336,8 +337,8 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
static int ksz9021_config_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
+ const struct device *dev = &phydev->dev;
+ const struct device_node *of_node = dev->of_node;
if (!of_node && dev->parent->of_node)
of_node = dev->parent->of_node;
@@ -365,6 +366,11 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define KSZ9031_PS_TO_REG 60
/* Extended registers */
+/* MMD Address 0x0 */
+#define MII_KSZ9031RN_FLP_BURST_TX_LO 3
+#define MII_KSZ9031RN_FLP_BURST_TX_HI 4
+
+/* MMD Address 0x2 */
#define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
#define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
#define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
@@ -389,9 +395,9 @@ static int ksz9031_extended_read(struct phy_device *phydev,
}
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
- struct device_node *of_node,
+ const struct device_node *of_node,
u16 reg, size_t field_sz,
- char *field[], u8 numfields)
+ const char *field[], u8 numfields)
{
int val[4] = {-1, -2, -3, -4};
int matches = 0;
@@ -425,20 +431,36 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
}
+static int ksz9031_center_flp_timing(struct phy_device *phydev)
+{
+ int result;
+
+ /* Center KSZ9031RNX FLP timing at 16ms. */
+ result = ksz9031_extended_write(phydev, OP_DATA, 0,
+ MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+ result = ksz9031_extended_write(phydev, OP_DATA, 0,
+ MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+
+ if (result)
+ return result;
+
+ return genphy_restart_aneg(phydev);
+}
+
static int ksz9031_config_init(struct phy_device *phydev)
{
- struct device *dev = &phydev->dev;
- struct device_node *of_node = dev->of_node;
- char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
- char *rx_data_skews[4] = {
+ const struct device *dev = &phydev->dev;
+ const struct device_node *of_node = dev->of_node;
+ static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+ static const char *rx_data_skews[4] = {
"rxd0-skew-ps", "rxd1-skew-ps",
"rxd2-skew-ps", "rxd3-skew-ps"
};
- char *tx_data_skews[4] = {
+ static const char *tx_data_skews[4] = {
"txd0-skew-ps", "txd1-skew-ps",
"txd2-skew-ps", "txd3-skew-ps"
};
- char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+ static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
if (!of_node && dev->parent->of_node)
of_node = dev->parent->of_node;
@@ -460,7 +482,8 @@ static int ksz9031_config_init(struct phy_device *phydev)
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4);
}
- return 0;
+
+ return ksz9031_center_flp_timing(phydev);
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -519,7 +542,7 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
- struct device_node *np = phydev->dev.of_node;
+ const struct device_node *np = phydev->dev.of_node;
struct kszphy_priv *priv;
struct clk *clk;
int ret;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 47cd578052fc..b2197b506acb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -58,6 +58,31 @@ static const char *phy_speed_to_str(int speed)
}
}
+#define PHY_STATE_STR(_state) \
+ case PHY_##_state: \
+ return __stringify(_state); \
+
+static const char *phy_state_to_str(enum phy_state st)
+{
+ switch (st) {
+ PHY_STATE_STR(DOWN)
+ PHY_STATE_STR(STARTING)
+ PHY_STATE_STR(READY)
+ PHY_STATE_STR(PENDING)
+ PHY_STATE_STR(UP)
+ PHY_STATE_STR(AN)
+ PHY_STATE_STR(RUNNING)
+ PHY_STATE_STR(NOLINK)
+ PHY_STATE_STR(FORCING)
+ PHY_STATE_STR(CHANGELINK)
+ PHY_STATE_STR(HALTED)
+ PHY_STATE_STR(RESUMING)
+ }
+
+ return NULL;
+}
+
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -784,10 +809,13 @@ void phy_state_machine(struct work_struct *work)
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
bool needs_aneg = false, do_suspend = false;
+ enum phy_state old_state;
int err = 0;
mutex_lock(&phydev->lock);
+ old_state = phydev->state;
+
if (phydev->drv->link_change_notify)
phydev->drv->link_change_notify(phydev);
@@ -952,6 +980,9 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
+ dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
+ phy_state_to_str(old_state), phy_state_to_str(phydev->state));
+
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
PHY_STATE_TIME * HZ);
}
@@ -1062,8 +1093,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if ((phydev->duplex == DUPLEX_FULL) &&
((phydev->interface == PHY_INTERFACE_MODE_MII) ||
(phydev->interface == PHY_INTERFACE_MODE_GMII) ||
- (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
- phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+ phy_interface_is_rgmii(phydev) ||
phy_is_internal(phydev))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b62a5e3a1c65..3837ae344f63 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -550,11 +550,11 @@ static struct proto pppoe_sk_proto __read_mostly = {
* Initialize a new struct sock.
*
**********************************************************************/
-static int pppoe_create(struct net *net, struct socket *sock)
+static int pppoe_create(struct net *net, struct socket *sock, int kern)
{
struct sock *sk;
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 2940e9fe351b..0e1b30622477 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -118,7 +118,7 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol,
!try_module_get(pppox_protos[protocol]->owner))
goto out;
- rc = pppox_protos[protocol]->create(net, sock);
+ rc = pppox_protos[protocol]->create(net, sock, kern);
module_put(pppox_protos[protocol]->owner);
out:
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index e3bfbd4d0136..14839bc0aaf5 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -561,14 +561,14 @@ static void pptp_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
}
-static int pptp_create(struct net *net, struct socket *sock)
+static int pptp_create(struct net *net, struct socket *sock, int kern)
{
int error = -ENOMEM;
struct sock *sk;
struct pppox_sock *po;
struct pptp_opt *opt;
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
if (!sk)
goto out;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6928448f6b7f..daa054b3ff03 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1924,7 +1924,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
struct team *team = netdev_priv(dev);
netdev_features_t mask;
- mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@@ -1977,8 +1977,12 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
.ndo_change_carrier = team_change_carrier,
- .ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
- .ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_features_check = passthru_features_check,
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e470ae59d405..1a1c4f7b3ec5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -146,7 +146,6 @@ struct tun_file {
struct socket socket;
struct socket_wq wq;
struct tun_struct __rcu *tun;
- struct net *net;
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
@@ -493,10 +492,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
-
- BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
- &tfile->socket.flags));
- sk_release_kernel(&tfile->sk);
+ sock_put(&tfile->sk);
}
}
@@ -1492,18 +1488,10 @@ out:
return ret;
}
-static int tun_release(struct socket *sock)
-{
- if (sock->sk)
- sock_put(sock->sk);
- return 0;
-}
-
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
- .release = tun_release,
};
static struct proto tun_proto = {
@@ -1865,7 +1853,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
if (cmd == TUNSETIFF && !tun) {
ifr.ifr_name[IFNAMSIZ-1] = '\0';
- ret = tun_set_iff(tfile->net, file, &ifr);
+ ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
if (ret)
goto unlock;
@@ -2154,16 +2142,16 @@ out:
static int tun_chr_open(struct inode *inode, struct file * file)
{
+ struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
- tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
- &tun_proto);
+ tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &tun_proto, 0);
if (!tfile)
return -ENOMEM;
RCU_INIT_POINTER(tfile->tun, NULL);
- tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
tfile->ifindex = 0;
@@ -2174,13 +2162,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.ops = &tun_socket_ops;
sock_init_data(&tfile->socket, &tfile->sk);
- sk_change_net(&tfile->sk, tfile->net);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
file->private_data = tfile;
- set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
INIT_LIST_HEAD(&tfile->next);
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
@@ -2191,10 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
- struct net *net = tfile->net;
tun_detach(tfile, true);
- put_net(net);
return 0;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 21a0fbf1ed94..34c519eb1db5 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -336,7 +336,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
nla_put_s32(skb, NDA_LINK_NETNSID,
- peernet2id(dev_net(vxlan->dev), vxlan->net)))
+ peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
goto nla_put_failure;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
@@ -1921,6 +1921,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst->remote_ifindex;
fl4.flowi4_tos = RT_TOS(tos);
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = dst->sin.sin_addr.s_addr;
fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
@@ -1981,6 +1983,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
fl6.flowi6_oif = rdst->remote_ifindex;
fl6.daddr = dst->sin6.sin6_addr;
fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+ fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
@@ -2128,9 +2131,10 @@ static void vxlan_cleanup(unsigned long arg)
if (!netif_running(vxlan->dev))
return;
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
+
+ spin_lock_bh(&vxlan->hash_lock);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2149,8 +2153,8 @@ static void vxlan_cleanup(unsigned long arg)
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
+ spin_unlock_bh(&vxlan->hash_lock);
}
- spin_unlock_bh(&vxlan->hash_lock);
mod_timer(&vxlan->age_timer, next_timer);
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index bcfa01add7cc..7193b7304fdd 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -517,7 +517,7 @@ static int cosa_probe(int base, int irq, int dma)
*/
set_current_state(TASK_INTERRUPTIBLE);
cosa_putstatus(cosa, SR_TX_INT_ENA);
- schedule_timeout(30);
+ schedule_timeout(msecs_to_jiffies(300));
irq = probe_irq_off(irqs);
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 08223569cebd..7a72407208b1 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -551,7 +551,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
msg, i);
goto done;
}
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
rmb();
} while (++i > 0);
netdev_err(dev, "%s timeout\n", msg);
@@ -596,7 +596,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
(dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
break;
smp_rmb();
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
} while (++i > 0);
return (i >= 0 ) ? i : -EAGAIN;
@@ -1033,7 +1033,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
/* Flush posted writes */
readl(ioaddr + GSTAR);
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
for (i = 0; i < 16; i++)
pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1046,7 +1046,6 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
static int dscc4_open(struct net_device *dev)
{
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
- struct dscc4_pci_priv *ppriv;
int ret = -EAGAIN;
if ((dscc4_loopback_check(dpriv) < 0))
@@ -1055,8 +1054,6 @@ static int dscc4_open(struct net_device *dev)
if ((ret = hdlc_open(dev)))
goto err;
- ppriv = dpriv->pci_priv;
-
/*
* Due to various bugs, there is no way to reliably reset a
* specific port (manufacturer's dependent special PCI #RST wiring
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 16604bdf5197..a63ab2e83105 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
source "drivers/net/wireless/rt2x00/Kconfig"
+source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/rtlwifi/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0c8891686718..6b9e729dd8ac 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
+obj-$(CONFIG_WL_MEDIATEK) += mediatek/
+
obj-$(CONFIG_P54_COMMON) += p54/
obj-$(CONFIG_ATH_CARDS) += ath/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f07a61899545..15f057ed41ad 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
switch (cline) {
- case 0x8: reg |= (0x1 << 14);
- break;
- case 0x16: reg |= (0x2 << 14);
- break;
- case 0x32: reg |= (0x3 << 14);
- break;
- default: reg |= (0x0 << 14);
- break;
+ case 0x8:
+ reg |= (0x1 << 14);
+ break;
+ case 0x10:
+ reg |= (0x2 << 14);
+ break;
+ case 0x20:
+ reg |= (0x3 << 14);
+ break;
+ default:
+ reg |= (0x0 << 14);
+ break;
}
}
@@ -1353,12 +1357,7 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
new_flags = 0;
- if (*total_flags & FIF_PROMISC_IN_BSS) {
- new_flags |= FIF_PROMISC_IN_BSS;
- priv->nar |= ADM8211_NAR_PR;
- priv->nar &= ~ADM8211_NAR_MM;
- mc_filter[1] = mc_filter[0] = ~0;
- } else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
+ if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
new_flags |= FIF_ALLMULTI;
priv->nar &= ~ADM8211_NAR_PR;
priv->nar |= ADM8211_NAR_MM;
@@ -1374,9 +1373,9 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
ADM8211_CSR_READ(NAR);
if (priv->nar & ADM8211_NAR_PR)
- dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
else
- dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
adm8211_set_bssid(dev, bcast);
@@ -1862,8 +1861,8 @@ static int adm8211_probe(struct pci_dev *pdev,
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
- /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
- dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
+ /* dev->flags = RX_INCLUDES_FCS in promisc mode */
+ ieee80211_hw_set(dev, SIGNAL_UNSPEC);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
dev->max_signal = 100; /* FIXME: find better value */
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 49219c508963..dab25136214a 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2360,8 +2360,8 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->hw->wiphy->max_scan_ie_len = 0;
priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC;
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
priv->hw->max_signal = 100;
SET_IEEE80211_DEV(priv->hw, &interface->dev);
diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h
index 55090a38ac95..ae03271f878e 100644
--- a/drivers/net/wireless/at76c50x-usb.h
+++ b/drivers/net/wireless/at76c50x-usb.h
@@ -447,7 +447,7 @@ struct at76_priv {
int mac80211_registered;
};
-#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
+#define AT76_SUPPORTED_FILTERS 0
#define SCAN_POLL_INTERVAL (HZ / 4)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 5147ebe4cd05..3b343c63aa52 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1319,8 +1319,7 @@ out_unlock:
}
-#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define AR5523_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_OTHER_BSS)
@@ -1683,9 +1682,9 @@ static int ar5523_probe(struct usb_interface *intf,
(id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
ar->vif = NULL;
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_HAS_RATE_CONTROL;
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
sizeof(struct ar5523_chunk);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7e9481099a8e..65ef483ebf50 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common,
* @ATH_DBG_DFS: radar datection
* @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_DYNACK: dynack handling
+ * @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -280,6 +281,7 @@ enum ATH_DEBUG {
ATH_DBG_WOW = 0x00020000,
ATH_DBG_CHAN_CTX = 0x00040000,
ATH_DBG_DYNACK = 0x00080000,
+ ATH_DBG_SPECTRAL_SCAN = 0x00100000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index f4dbb3e93bf8..9729e6941635 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -10,13 +10,15 @@ ath10k_core-y += mac.o \
wmi.o \
wmi-tlv.o \
bmi.o \
- hw.o
+ hw.o \
+ p2p.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c0e454bb6a8d..bcccae19325d 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
- if (!skip_otp && result != 0) {
+ if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ar->fw_features))
+ && result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
@@ -482,31 +484,79 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
return 0;
}
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
{
- int ret = 0;
+ char filename[100];
- if (ar->hw_params.fw.fw == NULL) {
- ath10k_err(ar, "firmware file not defined\n");
- return -EINVAL;
- }
+ scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
+
+ ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
+
+ ar->board_data = ar->board->data;
+ ar->board_len = ar->board->size;
+ ar->spec_board_loaded = true;
- if (ar->hw_params.fw.board == NULL) {
- ath10k_err(ar, "board data file not defined");
+ return 0;
+}
+
+static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+{
+ if (!ar->hw_params.fw.board) {
+ ath10k_err(ar, "failed to find board file fw entry\n");
return -EINVAL;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
- if (IS_ERR(ar->board)) {
- ret = PTR_ERR(ar->board);
- ath10k_err(ar, "could not fetch board data (%d)\n", ret);
- goto err;
- }
+ if (IS_ERR(ar->board))
+ return PTR_ERR(ar->board);
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
+ ar->spec_board_loaded = false;
+
+ return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+ int ret;
+
+ if (strlen(ar->spec_board_id) > 0) {
+ ret = ath10k_core_fetch_spec_board_file(ar);
+ if (ret) {
+ ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
+ ret);
+ goto generic;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
+ ar->spec_board_id);
+ return 0;
+ }
+
+generic:
+ ret = ath10k_core_fetch_generic_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+{
+ int ret = 0;
+
+ if (ar->hw_params.fw.fw == NULL) {
+ ath10k_err(ar, "firmware file not defined\n");
+ return -EINVAL;
+ }
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
@@ -675,6 +725,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
ar->wmi.op_version);
break;
+ case ATH10K_FW_IE_HTT_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ ar->htt.op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+ ar->htt.op_version);
+ break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
@@ -695,27 +756,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
goto err;
}
- /* now fetch the board file */
- if (ar->hw_params.fw.board == NULL) {
- ath10k_err(ar, "board data file not defined");
- ret = -EINVAL;
- goto err;
- }
-
- ar->board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board)) {
- ret = PTR_ERR(ar->board);
- ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
- ar->hw_params.fw.dir, ar->hw_params.fw.board,
- ret);
- goto err;
- }
-
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
-
return 0;
err:
@@ -730,6 +770,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ return ret;
+ }
+
+ ar->fw_api = 5;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+ if (ret == 0)
+ goto success;
+
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -958,6 +1011,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_NUM_STATIONS;
ar->max_num_vdevs = TARGET_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -966,12 +1021,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_10X_NUM_STATIONS;
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -979,6 +1039,29 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return -EINVAL;
}
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_HTT_OP_VERSION.
+ */
+ if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (ar->wmi.op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1080,9 +1163,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_wmi_wait_for_service_ready(ar);
- if (status <= 0) {
+ if (status) {
ath10k_warn(ar, "wmi service ready event not received");
- status = -ETIMEDOUT;
goto err_hif_stop;
}
}
@@ -1098,9 +1180,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
status = ath10k_wmi_wait_for_unified_ready(ar);
- if (status <= 0) {
+ if (status) {
ath10k_err(ar, "wmi unified ready event not received\n");
- status = -ETIMEDOUT;
goto err_hif_stop;
}
@@ -1151,6 +1232,7 @@ EXPORT_SYMBOL(ath10k_core_start);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
{
int ret;
+ unsigned long time_left;
reinit_completion(&ar->target_suspend);
@@ -1160,9 +1242,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
return ret;
}
- ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+ time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
- if (ret == 0) {
+ if (!time_left) {
ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT;
}
@@ -1386,6 +1468,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
+ init_completion(&ar->wow.wakeup_completed);
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index f65310c3ba5f..70fcdc9c2758 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -35,6 +35,7 @@
#include "../dfs_pattern_detector.h"
#include "spectral.h"
#include "thermal.h"
+#include "wow.h"
#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -43,15 +44,16 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_NUM_CHANS 38
+#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 128
-/* number of failed packets */
-#define ATH10K_KICKOUT_THRESHOLD 50
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
/*
* Use insanely high numbers to make sure that the firmware implementation
@@ -82,6 +84,8 @@ struct ath10k_skb_cb {
dma_addr_t paddr;
u8 eid;
u8 vdev_id;
+ enum ath10k_hw_txrx_mode txmode;
+ bool is_protected;
struct {
u8 tid;
@@ -280,6 +284,15 @@ struct ath10k_sta {
#endif
};
+struct ath10k_chanctx {
+ /* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
+ * mac80211 should allow some sort of explicit locking to guarantee
+ * that the publicly available chanctx_conf can be accessed safely at
+ * all times.
+ */
+ struct ieee80211_chanctx_conf conf;
+};
+
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state {
@@ -301,6 +314,7 @@ struct ath10k_vif {
enum ath10k_beacon_state beacon_state;
void *beacon_buf;
dma_addr_t beacon_paddr;
+ unsigned long tx_paused; /* arbitrary values defined by target */
struct ath10k *ar;
struct ieee80211_vif *vif;
@@ -334,13 +348,13 @@ struct ath10k_vif {
} ap;
} u;
- u8 fixed_rate;
- u8 fixed_nss;
- u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
int txpower;
struct wmi_wmm_params_all_arg wmm_params;
+ struct work_struct ap_csa_work;
+ struct delayed_work connection_loss_work;
+ struct cfg80211_bitrate_mask bitrate_mask;
};
struct ath10k_vif_iter {
@@ -440,6 +454,20 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+ /* Some firmware revisions have an incomplete WoWLAN implementation
+ * despite WMI service bit being advertised. This feature flag is used
+ * to distinguish whether WoWLAN is really supported or not.
+ */
+ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+ /* Don't trust error code from otp.bin */
+ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+
+ /* Some firmware revisions pad 4th hw address to 4 byte boundary making
+ * it 8 bytes long in Native Wifi Rx decap.
+ */
+ ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -498,6 +526,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
return "unknown";
}
+enum ath10k_tx_pause_reason {
+ ATH10K_TX_PAUSE_Q_FULL,
+ ATH10K_TX_PAUSE_MAX,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -511,12 +544,15 @@ struct ath10k {
u32 fw_version_minor;
u16 fw_version_release;
u16 fw_version_build;
+ u32 fw_stats_req_mask;
u32 phy_capability;
u32 hw_min_tx_power;
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
+ /* protected by conf_mutex */
+ bool ani_enabled;
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
@@ -565,6 +601,9 @@ struct ath10k {
const struct firmware *cal_file;
+ char spec_board_id[100];
+ bool spec_board_loaded;
+
int fw_api;
enum ath10k_cal_mode cal_mode;
@@ -593,6 +632,7 @@ struct ath10k {
struct cfg80211_chan_def chandef;
unsigned long long free_vdev_map;
+ struct ath10k_vif *monitor_arvif;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
@@ -633,6 +673,7 @@ struct ath10k {
int max_num_peers;
int max_num_stations;
int max_num_vdevs;
+ int max_num_tdls_vdevs;
struct work_struct offchan_tx_work;
struct sk_buff_head offchan_tx_queue;
@@ -655,6 +696,8 @@ struct ath10k {
struct dfs_pattern_detector *dfs_detector;
+ unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
@@ -686,6 +729,7 @@ struct ath10k {
} stats;
struct ath10k_thermal thermal;
+ struct ath10k_wow wow;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 301081db1ef6..8fa606a9c4dd 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
+ (strlen(ar->spec_board_id) > 0 ? ", " : ""),
+ ar->spec_board_id,
+ (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
+ ? " fallback" : ""),
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
@@ -380,12 +384,12 @@ unlock:
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
- unsigned long timeout;
+ unsigned long timeout, time_left;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- timeout = jiffies + msecs_to_jiffies(1*HZ);
+ timeout = jiffies + msecs_to_jiffies(1 * HZ);
ath10k_debug_fw_stats_reset(ar);
@@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
reinit_completion(&ar->debug.fw_stats_complete);
- ret = ath10k_wmi_request_stats(ar,
- WMI_STAT_PDEV |
- WMI_STAT_VDEV |
- WMI_STAT_PEER);
+ ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret;
}
- ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1*HZ);
- if (ret == 0)
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock);
@@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode,
return 0;
}
+static ssize_t ath10k_write_ani_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u8 enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ani_enabled == enable) {
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+ enable);
+ if (ret) {
+ ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+ goto exit;
+ }
+ ar->ani_enabled = enable;
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->ani_enabled);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+ .read = ath10k_read_ani_enable,
+ .write = ath10k_write_ani_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
@@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = {
.open = simple_open
};
+static ssize_t ath10k_write_quiet_period(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 period;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &period))
+ return -EINVAL;
+
+ if (period < ATH10K_QUIET_PERIOD_MIN) {
+ ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+ period);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.quiet_period = period;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->thermal.quiet_period);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+ .read = ath10k_read_quiet_period,
+ .write = ath10k_write_quiet_period,
+ .open = simple_open
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
@@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
+ debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_ani_enable);
+
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
@@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
+ debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_quiet_period);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index a12b8323f9f1..53bd6a19eab6 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -36,6 +36,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_REGULATORY = 0x00000800,
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_WMI_PRINT = 0x00002000,
+ ATH10K_DBG_PCI_PS = 0x00004000,
ATH10K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 2fd9e180272b..85bfa2acb801 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
-/* assumes tx_lock is held */
-static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
-{
- struct ath10k *ar = ep->htc->ar;
-
- if (!ep->tx_credit_flow_enabled)
- return false;
- if (ep->tx_credits >= ep->tx_credits_per_max_message)
- return false;
-
- ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
- ep->eid);
- return true;
-}
-
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
@@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
-
- if (ath10k_htc_ep_need_credit_update(ep))
- hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
-
spin_unlock_bh(&ep->htc->tx_lock);
}
@@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
- default:
+ case ATH10K_HTC_MSG_READY_ID:
+ case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/*
@@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
+ break;
+ default:
+ ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+ break;
}
goto out;
}
@@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
+ unsigned long time_left;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_msg *msg;
@@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
u16 credit_count;
u16 credit_size;
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status == 0) {
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
@@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
- if (status == 0)
+ if (!time_left)
status = -ETIMEDOUT;
}
@@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
+ unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
@@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
}
/* wait for response */
- status = wait_for_completion_timeout(&htc->ctl_resp,
- ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
- if (status == 0) {
- ath10k_err(ar, "Service connect timeout: %d\n", status);
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_err(ar, "Service connect timeout\n");
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 4f59ab923e48..6da6ef26143a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -22,6 +22,86 @@
#include "core.h"
#include "debug.h"
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+ [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+ [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+ [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
@@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
+ switch (ar->htt.op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_1:
+ ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_TLV:
+ ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAIN:
+ ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAX:
+ case ATH10K_FW_HTT_OP_VERSION_UNSET:
+ WARN_ON(1);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 874bf44ff7a2..7e8a0d835663 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -25,7 +25,9 @@
#include <net/mac80211.h>
#include "htc.h"
+#include "hw.h"
#include "rx_desc.h"
+#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -271,35 +273,108 @@ enum htt_mgmt_tx_status {
/*=== target -> host messages ===============================================*/
-enum htt_t2h_msg_type {
- HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
- HTT_T2H_MSG_TYPE_RX_IND = 0x1,
- HTT_T2H_MSG_TYPE_RX_FLUSH = 0x2,
- HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
- HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
- HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
- HTT_T2H_MSG_TYPE_RX_DELBA = 0x6,
- HTT_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
- HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
- HTT_T2H_MSG_TYPE_STATS_CONF = 0x9,
- HTT_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
- HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
- HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
- HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
- HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
- HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
- HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
- HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
- HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+enum htt_main_t2h_msg_type {
+ HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_MAIN_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+ HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
+ HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
+ HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
+ HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
+ /* keep this last */
+ HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+ HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
+ HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */
- HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
+ HTT_TLV_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_TLV_T2H_NUM_MSGS
+};
- /* FIXME: Do not depend on this event id. Numbering of this event id is
- * broken across different firmware revisions and HTT version fails to
- * indicate this.
- */
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_IND,
+ HTT_T2H_MSG_TYPE_RX_FLUSH,
+ HTT_T2H_MSG_TYPE_PEER_MAP,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ HTT_T2H_MSG_TYPE_RX_ADDBA,
+ HTT_T2H_MSG_TYPE_RX_DELBA,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ HTT_T2H_MSG_TYPE_PKTLOG,
+ HTT_T2H_MSG_TYPE_STATS_CONF,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ HTT_T2H_MSG_TYPE_SEC_IND,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_RX_PN_IND,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ HTT_T2H_MSG_TYPE_AGGR_CONF,
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
HTT_T2H_MSG_TYPE_TEST,
-
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1222,6 +1297,7 @@ struct htt_tx_done {
u32 msdu_id;
bool discard;
bool no_ack;
+ bool success;
};
struct htt_peer_map_event {
@@ -1248,6 +1324,10 @@ struct ath10k_htt {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
+ enum ath10k_fw_htt_op_version op_version;
+
+ const enum htt_t2h_msg_type *t2h_msg_types;
+ u32 t2h_msg_types_max;
struct {
/*
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 01a2b384f358..89eb16b30fc4 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
-struct rfc1042_hdr {
- u8 llc_dsap;
- u8 llc_ssap;
- u8 llc_ctrl;
- u8 snap_oui[3];
- __be16 snap_type;
-} __packed;
-
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
-static const u8 rx_legacy_rate_idx[] = {
- 3, /* 0x00 - 11Mbps */
- 2, /* 0x01 - 5.5Mbps */
- 1, /* 0x02 - 2Mbps */
- 0, /* 0x03 - 1Mbps */
- 3, /* 0x04 - 11Mbps */
- 2, /* 0x05 - 5.5Mbps */
- 1, /* 0x06 - 2Mbps */
- 0, /* 0x07 - 1Mbps */
- 10, /* 0x08 - 48Mbps */
- 8, /* 0x09 - 24Mbps */
- 6, /* 0x0A - 12Mbps */
- 4, /* 0x0B - 6Mbps */
- 11, /* 0x0C - 54Mbps */
- 9, /* 0x0D - 36Mbps */
- 7, /* 0x0E - 18Mbps */
- 5, /* 0x0F - 9Mbps */
-};
-
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
- enum ieee80211_band band;
- u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+ struct ieee80211_supported_band *sband;
+ u8 cck, rate, bw, sgi, mcs, nss;
u8 preamble = 0;
u32 info1, info2, info3;
- /* Band value can't be set as undefined but freq can be 0 - use that to
- * determine whether band is provided.
- *
- * FIXME: Perhaps this can go away if CCK rate reporting is a little
- * reworked?
- */
- if (!status->freq)
- return;
-
- band = status->band;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
info3 = __le32_to_cpu(rxd->ppdu_start.info3);
@@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
switch (preamble) {
case HTT_RX_LEGACY:
+ /* To get legacy rate index band is required. Since band can't
+ * be undefined check if freq is non-zero.
+ */
+ if (!status->freq)
+ return;
+
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
- rate_idx = 0;
-
- if (rate < 0x08 || rate > 0x0F)
- break;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- if (cck)
- rate &= ~BIT(3);
- rate_idx = rx_legacy_rate_idx[rate];
- break;
- case IEEE80211_BAND_5GHZ:
- rate_idx = rx_legacy_rate_idx[rate];
- /* We are using same rate table registering
- HW - ath10k_rates[]. In case of 5GHz skip
- CCK rates, so -4 here */
- rate_idx -= 4;
- break;
- default:
- break;
- }
+ rate &= ~RX_PPDU_START_RATE_FLAG;
- status->rate_idx = rate_idx;
+ sband = &ar->mac.sbands[status->band];
+ status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
@@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
}
}
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+ u16 peer_id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!rxd)
+ return NULL;
+
+ if (rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+ return NULL;
+
+ if (!(rxd->msdu_end.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+ return NULL;
+
+ peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_PEER_IDX);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer)
+ return NULL;
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (WARN_ON_ONCE(!arvif))
+ return NULL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return NULL;
+
+ return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_id == vdev_id &&
+ ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+ return def.chan;
+ }
+
+ return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def *def = data;
+
+ *def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def def = {};
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_htt_rx_h_any_chan_iter,
+ &def);
+
+ return def.chan;
+}
+
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd,
+ u32 vdev_id)
{
struct ieee80211_channel *ch;
@@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+ if (!ch)
+ ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
spin_unlock_bh(&ar->data_lock);
if (!ch)
@@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ u32 vdev_id)
{
struct sk_buff *first;
struct htt_rx_desc *rxd;
@@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_signal(ar, status, rxd);
- ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd);
}
@@ -929,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar,
ieee80211_rx(ar->hw, skb);
}
-static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
{
- /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
- return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+ int len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+ ar->fw_features))
+ len = round_up(len, 4);
+
+ return len;
}
static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
@@ -1031,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* pull decapped header and copy SA & DA */
hdr = (struct ieee80211_hdr *)msdu->data;
- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, hdr_len);
@@ -1522,7 +1564,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
break;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
@@ -1569,7 +1611,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
return;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
@@ -1598,6 +1640,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
tx_done.no_ack = true;
break;
case HTT_DATA_TX_STATUS_OK:
+ tx_done.success = true;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
@@ -1796,7 +1839,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
- ath10k_htt_rx_h_channel(ar, status);
+ ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
}
}
@@ -1869,7 +1912,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
- ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
@@ -1892,6 +1935,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
+ enum htt_t2h_msg_type type;
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
@@ -1899,7 +1943,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
- switch (resp->hdr.msg_type) {
+
+ if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+ resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+ switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
htt->target_version_major = resp->ver_resp.major;
htt->target_version_minor = resp->ver_resp.minor;
@@ -1937,6 +1990,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
+ tx_done.success = true;
break;
case HTT_MGMT_TX_STATUS_RETRY:
tx_done.no_ack = true;
@@ -1976,7 +2030,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_TEST:
- /* FIX THIS */
break;
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(ar, skb->data, skb->len);
@@ -2018,11 +2071,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
return;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
- /* FIXME: This WMI-TLV event is overlapping with 10.2
- * CHAN_CHANGE - both being 0xF. Neither is being used in
- * practice so no immediate action is necessary. Nevertheless
- * HTT may need an abstraction layer like WMI has one day.
- */
+ break;
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
break;
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index cbd2bc9e6202..a60ef7d1d5fc 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
- ieee80211_wake_queues(htt->ar->hw);
+ ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
@@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx)
- ieee80211_stop_queues(htt->ar->hw);
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
exit:
spin_unlock_bh(&htt->tx_lock);
@@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
- dma_addr_t paddr;
- u32 frags_paddr;
- bool use_frags;
+ dma_addr_t paddr = 0;
+ u32 frags_paddr = 0;
res = ath10k_htt_tx_inc_pending(htt);
if (res)
@@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- /* Since HTT 3.0 there is no separate mgmt tx command. However in case
- * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
- * fragment list host driver specifies directly frame pointer. */
- use_frags = htt->target_version_major < 3 ||
- !ieee80211_is_mgmt(hdr->frame_control);
-
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr);
if (!skb_cb->htt.txbuf) {
@@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
if (res)
goto err_free_txbuf;
- if (likely(use_frags)) {
+ switch (skb_cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* pass through */
+ case ATH10K_HW_TXRX_ETHERNET:
frags = skb_cb->htt.txbuf->frags;
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
@@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
frags[1].paddr = 0;
frags[1].len = 0;
- flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
- HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
frags_paddr = skb_cb->htt.txbuf_paddr;
- } else {
+ break;
+ case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
frags_paddr = skb_cb->paddr;
+ break;
}
/* Normally all commands go through HTC which manages tx credits for
@@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
prefetch_len);
skb_cb->htt.txbuf->htc_hdr.flags = 0;
- if (!ieee80211_has_protected(hdr->frame_control))
+ if (!skb_cb->is_protected)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
- flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 460771fcfe9e..89e09cbeac19 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -78,6 +78,9 @@ enum qca6174_chip_id_rev {
/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
#define ATH10K_FW_API4_FILE "firmware-4.bin"
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE "firmware-5.bin"
+
#define ATH10K_FW_UTF_FILE "utf.bin"
/* includes also the null byte */
@@ -104,6 +107,11 @@ enum ath10k_fw_ie_type {
* FW API 4 and above.
*/
ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+ /* HTT "operations" interface version, 32 bit value. Supported from
+ * FW API 5 and above.
+ */
+ ATH10K_FW_IE_HTT_OP_VERSION = 6,
};
enum ath10k_fw_wmi_op_version {
@@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX,
};
+enum ath10k_fw_htt_op_version {
+ ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+ /* also used in 10.2 and 10.2.4 branches */
+ ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+ ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+ /* keep last */
+ ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
@@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr {
u8 payload[0];
} __packed;
+enum ath10k_hw_rate_ofdm {
+ ATH10K_HW_RATE_OFDM_48M = 0,
+ ATH10K_HW_RATE_OFDM_24M,
+ ATH10K_HW_RATE_OFDM_12M,
+ ATH10K_HW_RATE_OFDM_6M,
+ ATH10K_HW_RATE_OFDM_54M,
+ ATH10K_HW_RATE_OFDM_36M,
+ ATH10K_HW_RATE_OFDM_18M,
+ ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+ ATH10K_HW_RATE_CCK_LP_11M = 0,
+ ATH10K_HW_RATE_CCK_LP_5_5M,
+ ATH10K_HW_RATE_CCK_LP_2M,
+ ATH10K_HW_RATE_CCK_LP_1M,
+ ATH10K_HW_RATE_CCK_SP_11M,
+ ATH10K_HW_RATE_CCK_SP_5_5M,
+ ATH10K_HW_RATE_CCK_SP_2M,
+};
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
-#define TARGET_10X_AST_SKID_LIMIT 16
+#define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS))
@@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr {
#define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */
-#define TARGET_TLV_NUM_VDEVS 3
+#define TARGET_TLV_NUM_VDEVS 4
#define TARGET_TLV_NUM_STATIONS 32
-#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
- (TARGET_TLV_NUM_VDEVS) + \
- 2)
+#define TARGET_TLV_NUM_PEERS 35
+#define TARGET_TLV_NUM_TDLS_VDEVS 1
#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS 22
/* Number of Copy Engines supported */
#define CE_COUNT 8
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 973485bd4121..609ca8619ebd 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -28,7 +28,131 @@
#include "txrx.h"
#include "testmode.h"
#include "wmi.h"
+#include "wmi-tlv.h"
#include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
/**********/
/* Crypto */
@@ -37,7 +161,7 @@
static int ath10k_send_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr, bool def_idx)
+ const u8 *macaddr, u32 flags)
{
struct ath10k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
@@ -45,16 +169,12 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
.key_idx = key->keyidx,
.key_len = key->keylen,
.key_data = key->key,
+ .key_flags = flags,
.macaddr = macaddr,
};
lockdep_assert_held(&arvif->ar->conf_mutex);
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- arg.key_flags = WMI_KEY_PAIRWISE;
- else
- arg.key_flags = WMI_KEY_GROUP;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
arg.key_cipher = WMI_CIPHER_AES_CCM;
@@ -68,17 +188,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
arg.key_cipher = WMI_CIPHER_WEP;
- /* AP/IBSS mode requires self-key to be groupwise
- * Otherwise pairwise key must be set */
- if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
- arg.key_flags = WMI_KEY_PAIRWISE;
-
- if (def_idx)
- arg.key_flags |= WMI_KEY_TX_USAGE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- /* this one needs to be done in software */
- return 1;
+ WARN_ON(1);
+ return -EINVAL;
default:
ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
@@ -95,21 +208,22 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
static int ath10k_install_key(struct ath10k_vif *arvif,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd,
- const u8 *macaddr, bool def_idx)
+ const u8 *macaddr, u32 flags)
{
struct ath10k *ar = arvif->ar;
int ret;
+ unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->install_key_done);
- ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
@@ -122,7 +236,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
struct ath10k_peer *peer;
int ret;
int i;
- bool def_idx;
+ u32 flags;
lockdep_assert_held(&ar->conf_mutex);
@@ -136,14 +250,20 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
if (arvif->wep_keys[i] == NULL)
continue;
- /* set TX_USAGE flag for default key id */
- if (arvif->def_wep_key_idx == i)
- def_idx = true;
- else
- def_idx = false;
+
+ flags = 0;
+ flags |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+ addr, flags);
+ if (ret)
+ return ret;
+
+ flags = 0;
+ flags |= WMI_KEY_GROUP;
ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
- addr, def_idx);
+ addr, flags);
if (ret)
return ret;
@@ -152,6 +272,27 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
spin_unlock_bh(&ar->data_lock);
}
+ /* In some cases (notably with static WEP IBSS with multiple keys)
+ * multicast Tx becomes broken. Both pairwise and groupwise keys are
+ * installed already. Using WMI_KEY_TX_USAGE in different combinations
+ * didn't seem help. Using def_keyid vdev parameter seems to be
+ * effective so use that.
+ *
+ * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+ */
+ if (arvif->def_wep_key_idx == -1)
+ return 0;
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ arvif->def_wep_key_idx);
+ if (ret) {
+ ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
return 0;
}
@@ -163,6 +304,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
int first_errno = 0;
int ret;
int i;
+ u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -179,7 +321,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
/* key flags are not required to delete the key */
ret = ath10k_install_key(arvif, peer->keys[i],
- DISABLE_KEY, addr, false);
+ DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
first_errno = ret;
@@ -229,6 +371,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
int first_errno = 0;
int ret;
int i;
+ u32 flags = 0;
lockdep_assert_held(&ar->conf_mutex);
@@ -254,7 +397,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
if (i == ARRAY_SIZE(peer->keys))
break;
/* key flags are not required to delete the key */
- ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
if (ret && first_errno == 0)
first_errno = ret;
@@ -266,6 +409,39 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
return first_errno;
}
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+ continue;
+
+ if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+ continue;
+
+ if (peer->keys[key->keyidx] == key)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+ arvif->vdev_id, key->keyidx);
+
+ ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+ arvif->vdev_id, peer->addr, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/*********************/
/* General utilities */
/*********************/
@@ -364,7 +540,56 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
}
}
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ int *num = data;
+
+ (*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+ int num = 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_num_chanctxs_iter,
+ &num);
+
+ return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+ enum wmi_peer_type peer_type)
{
int ret;
@@ -373,7 +598,7 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
if (ar->num_peers >= ar->max_num_peers)
return -ENOBUFS;
- ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
@@ -517,6 +742,38 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
ar->num_stations = 0;
}
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+ struct ieee80211_sta *sta,
+ enum wmi_tdls_peer_state state)
+{
+ int ret;
+ struct wmi_tdls_peer_update_cmd_arg arg = {};
+ struct wmi_tdls_peer_capab_arg cap = {};
+ struct wmi_channel_arg chan_arg = {};
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = vdev_id;
+ arg.peer_state = state;
+ ether_addr_copy(arg.addr, sta->addr);
+
+ cap.peer_max_sp = sta->max_sp;
+ cap.peer_uapsd_queues = sta->uapsd_queues;
+
+ if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+ !sta->tdls_initiator)
+ cap.is_peer_responder = 1;
+
+ ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+ arg.addr, vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/************************/
/* Interface management */
/************************/
@@ -561,16 +818,16 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
lockdep_assert_held(&ar->conf_mutex);
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
return -ESHUTDOWN;
- ret = wait_for_completion_timeout(&ar->vdev_setup_done,
- ATH10K_VDEV_SETUP_TIMEOUT_HZ);
- if (ret == 0)
+ time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (time_left == 0)
return -ETIMEDOUT;
return 0;
@@ -578,13 +835,21 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
- struct cfg80211_chan_def *chandef = &ar->chandef;
+ struct cfg80211_chan_def *chandef = NULL;
struct ieee80211_channel *channel = chandef->chan;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &chandef);
+ if (WARN_ON_ONCE(!chandef))
+ return -ENOENT;
+
+ channel = chandef->chan;
+
arg.vdev_id = vdev_id;
arg.channel.freq = channel->center_freq;
arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -766,27 +1031,78 @@ static int ath10k_monitor_stop(struct ath10k *ar)
return 0;
}
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ /* At least one chanctx is required to derive a channel to start
+ * monitor vdev on.
+ */
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+ if (num_ctx == 0)
+ return false;
+
+ /* If there's already an existing special monitor interface then don't
+ * bother creating another monitor vdev.
+ */
+ if (ar->monitor_arvif)
+ return false;
+
+ return ar->monitor ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+
+ /* FIXME: Current interface combinations and cfg80211/mac80211 code
+ * shouldn't allow this but make sure to prevent handling the following
+ * case anyway since multi-channel DFS hasn't been tested at all.
+ */
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+ return false;
+
+ return true;
+}
+
static int ath10k_monitor_recalc(struct ath10k *ar)
{
- bool should_start;
+ bool needed;
+ bool allowed;
+ int ret;
lockdep_assert_held(&ar->conf_mutex);
- should_start = ar->monitor ||
- ar->filter_flags & FIF_PROMISC_IN_BSS ||
- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ needed = ath10k_mac_monitor_vdev_is_needed(ar);
+ allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac monitor recalc started? %d should? %d\n",
- ar->monitor_started, should_start);
+ "mac monitor recalc started? %d needed? %d allowed? %d\n",
+ ar->monitor_started, needed, allowed);
- if (should_start == ar->monitor_started)
+ if (WARN_ON(needed && !allowed)) {
+ if (ar->monitor_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+ ret = ath10k_monitor_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+ /* not serious */
+ }
+
+ return -EPERM;
+ }
+
+ if (needed == ar->monitor_started)
return 0;
- if (should_start)
+ if (needed)
return ath10k_monitor_start(ar);
-
- return ath10k_monitor_stop(ar);
+ else
+ return ath10k_monitor_stop(ar);
}
static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
@@ -798,12 +1114,14 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
vdev_param = ar->wmi.vdev_param->enable_rtscts;
- if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
- rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
if (arvif->num_legacy_stations > 0)
rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
WMI_RTSCTS_PROFILE);
+ else
+ rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_PROFILE);
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
rts_cts);
@@ -846,6 +1164,27 @@ static int ath10k_stop_cac(struct ath10k *ar)
return 0;
}
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ bool *ret = data;
+
+ if (!*ret && conf->radar_enabled)
+ *ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+ bool has_radar = false;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_has_radar_iter,
+ &has_radar);
+
+ return has_radar;
+}
+
static void ath10k_recalc_radar_detection(struct ath10k *ar)
{
int ret;
@@ -854,7 +1193,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
ath10k_stop_cac(ar);
- if (!ar->radar_enabled)
+ if (!ath10k_mac_has_radar_enabled(ar))
return;
if (ar->num_started_vdevs > 0)
@@ -872,10 +1211,44 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
}
}
-static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
{
struct ath10k *ar = arvif->ar;
- struct cfg80211_chan_def *chandef = &ar->chandef;
struct wmi_vdev_start_request_arg arg = {};
int ret = 0;
@@ -939,47 +1312,16 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
return ret;
}
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
-{
- return ath10k_vdev_start_restart(arvif, false);
-}
-
-static int ath10k_vdev_restart(struct ath10k_vif *arvif)
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
{
- return ath10k_vdev_start_restart(arvif, true);
+ return ath10k_vdev_start_restart(arvif, def, false);
}
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
{
- struct ath10k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- ret = ath10k_vdev_setup_sync(ar);
- if (ret) {
- ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
- arvif->vdev_id, ret);
- return ret;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- if (ar->num_started_vdevs != 0) {
- ar->num_started_vdevs--;
- ath10k_recalc_radar_detection(ar);
- }
-
- return ret;
+ return ath10k_vdev_start_restart(arvif, def, true);
}
static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
@@ -1056,6 +1398,10 @@ static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
return 0;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
bcn = ieee80211_beacon_get_template(hw, vif, &offs);
if (!bcn) {
ath10k_warn(ar, "failed to get beacon template from mac80211\n");
@@ -1101,6 +1447,9 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
return 0;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
prb = ieee80211_proberesp_get(hw, vif);
if (!prb) {
ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
@@ -1119,6 +1468,80 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
return 0;
}
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ int ret;
+
+ /* When originally vdev is started during assign_vif_chanctx() some
+ * information is missing, notably SSID. Firmware revisions with beacon
+ * offloading require the SSID to be provided during vdev (re)start to
+ * handle hidden SSID properly.
+ *
+ * Vdev restart must be done after vdev has been both started and
+ * upped. Otherwise some firmware revisions (at least 10.2) fail to
+ * deliver vdev restart response event causing timeouts during vdev
+ * syncing in ath10k.
+ *
+ * Note: The vdev down/up and template reinstallation could be skipped
+ * since only wmi-tlv firmware are known to have beacon offload and
+ * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+ * response delivery. It's probably more robust to keep it as is.
+ */
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EINVAL;
+
+ if (WARN_ON(!arvif->is_up))
+ return -EINVAL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return -EINVAL;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+ * firmware will crash upon vdev up.
+ */
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_restart(arvif, &def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
@@ -1128,9 +1551,11 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->enable_beacon) {
- ath10k_vdev_stop(arvif);
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
- arvif->is_started = false;
arvif->is_up = false;
spin_lock_bh(&arvif->ar->data_lock);
@@ -1142,10 +1567,6 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->tx_seq_no = 0x1000;
- ret = ath10k_vdev_start(arvif);
- if (ret)
- return;
-
arvif->aid = 0;
ether_addr_copy(arvif->bssid, info->bssid);
@@ -1154,13 +1575,18 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
if (ret) {
ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
- ath10k_vdev_stop(arvif);
return;
}
- arvif->is_started = true;
arvif->is_up = true;
+ ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
@@ -1175,11 +1601,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
lockdep_assert_held(&arvif->ar->conf_mutex);
if (!info->ibss_joined) {
- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
- if (ret)
- ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
- self_peer, arvif->vdev_id, ret);
-
if (is_zero_ether_addr(arvif->bssid))
return;
@@ -1188,13 +1609,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
return;
}
- ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
- if (ret) {
- ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
- self_peer, arvif->vdev_id, ret);
- return;
- }
-
vdev_param = arvif->ar->wmi.vdev_param->atim_window;
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
@@ -1294,7 +1708,14 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
enable_ps = false;
}
- if (enable_ps) {
+ if (!arvif->is_started) {
+ /* mac80211 can update vif powersave state while disconnected.
+ * Firmware doesn't behave nicely and consumes more power than
+ * necessary if PS is disabled on a non-started vdev. Hence
+ * force-enable PS for non-running vdevs.
+ */
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ } else if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@@ -1361,6 +1782,123 @@ static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
return 0;
}
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+ return;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (!vif->csa_active)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ if (!ieee80211_csa_is_complete(vif)) {
+ ieee80211_csa_update_counter(vif);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+ } else {
+ ieee80211_csa_finish(vif);
+ }
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ ap_csa_work);
+ struct ath10k *ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_vif_ap_csa_count_down(arvif);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
/**********************/
/* Station management */
/**********************/
@@ -1388,12 +1926,18 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 aid;
lockdep_assert_held(&ar->conf_mutex);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
- arg->peer_aid = sta->aid;
+ arg->peer_aid = aid;
arg->peer_flags |= WMI_PEER_AUTH;
arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
@@ -1405,15 +1949,18 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
lockdep_assert_held(&ar->conf_mutex);
- bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
- info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
const struct cfg80211_bss_ies *ies;
@@ -1443,19 +1990,29 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ enum ieee80211_band band;
u32 ratemask;
+ u8 rate;
int i;
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
- ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
rateset->num_rates = 0;
@@ -1464,24 +2021,66 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
if (!(ratemask & 1))
continue;
- rateset->rates[rateset->num_rates] = rates->hw_value;
+ rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
rateset->num_rates++;
}
}
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int i, n;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ int i, n, max_nss;
u32 stbc;
lockdep_assert_held(&ar->conf_mutex);
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
if (!ht_cap->ht_supported)
return;
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+ ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
arg->peer_flags |= WMI_PEER_HT;
arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ht_cap->ampdu_factor)) - 1;
@@ -1500,11 +2099,13 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
}
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
- if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
- arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ }
if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
@@ -1524,9 +2125,12 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
else if (ht_cap->mcs.rx_mask[1])
arg->peer_rate_caps |= WMI_RC_DS_FLAG;
- for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
- if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
arg->peer_ht_rates.rates[n++] = i;
+ }
/*
* This is a workaround for HT-enabled STAs which break the spec
@@ -1543,7 +2147,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_ht_rates.rates[i] = i;
} else {
arg->peer_ht_rates.num_rates = n;
- arg->peer_num_spatial_streams = sta->rx_nss;
+ arg->peer_num_spatial_streams = max_nss;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1619,19 +2223,84 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
return 0;
}
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ default:
+ /* see ath10k_mac_can_set_bitrate_mask() */
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u16 *vht_mcs_mask;
u8 ampdu_factor;
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
if (!vht_cap->vht_supported)
return;
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
arg->peer_flags |= WMI_PEER_VHT;
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (def.chan->band == IEEE80211_BAND_2GHZ)
arg->peer_flags |= WMI_PEER_VHT_2G;
arg->peer_vht_caps = vht_cap->cap;
@@ -1657,8 +2326,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
arg->peer_vht_rates.tx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
- arg->peer_vht_rates.tx_mcs_set =
- __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+ arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
@@ -1697,10 +2366,10 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
}
-static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- /* First 4 rates in ath10k_rates are CCK (11b) rates. */
- return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
+ return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
@@ -1708,21 +2377,35 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
- switch (ar->hw->conf.chandef.chan->band) {
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
case IEEE80211_BAND_2GHZ:
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported) {
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11NG_HT40;
else
phymode = MODE_11NG_HT20;
- } else if (ath10k_mac_sta_has_11g_rates(sta)) {
+ } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
phymode = MODE_11G;
} else {
phymode = MODE_11B;
@@ -1733,15 +2416,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
/*
* Check VHT first.
*/
- if (sta->vht_cap.vht_supported) {
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AC_VHT80;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
phymode = MODE_11AC_VHT40;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
phymode = MODE_11AC_VHT20;
- } else if (sta->ht_cap.ht_supported) {
- if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
phymode = MODE_11NA_HT40;
else
phymode = MODE_11NA_HT20;
@@ -1772,9 +2457,9 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
ath10k_peer_assoc_h_crypto(ar, vif, arg);
- ath10k_peer_assoc_h_rates(ar, sta, arg);
- ath10k_peer_assoc_h_ht(ar, sta, arg);
- ath10k_peer_assoc_h_vht(ar, sta, arg);
+ ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
@@ -1993,6 +2678,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
}
arvif->is_up = false;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
}
static int ath10k_station_assoc(struct ath10k *ar,
@@ -2013,7 +2700,6 @@ static int ath10k_station_assoc(struct ath10k *ar,
return ret;
}
- peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -2274,6 +2960,149 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused |= BIT(reason);
+ ieee80211_stop_queues(ar->hw);
+}
+
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = data;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_tx_unlock_iter,
+ ar);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused |= BIT(reason);
+ ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
+ break;
+ default:
+ ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
+ break;
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ default:
+ /* FIXME: Some pause_ids aren't vdev specific. Instead they
+ * target peer_id and tid. Implementing these could improve
+ * traffic scheduling fairness across multiple connected
+ * stations in AP/IBSS modes.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause vdev %i id %d\n",
+ arvif->vdev_id, pause_id);
+ break;
+ }
+}
+
+struct ath10k_mac_tx_pause {
+ u32 vdev_id;
+ enum wmi_tlv_tx_pause_id pause_id;
+ enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_mac_tx_pause *arg = data;
+
+ ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k_mac_tx_pause arg = {
+ .vdev_id = vdev_id,
+ .pause_id = pause_id,
+ .action = action,
+ };
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_handle_tx_pause_iter,
+ &arg);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
{
if (ieee80211_is_mgmt(hdr->frame_control))
@@ -2300,6 +3129,52 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
return 0;
}
+static enum ath10k_hw_txrx_mode
+ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+ return ATH10K_HW_TXRX_RAW;
+
+ if (ieee80211_is_mgmt(fc))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * NullFunc frames are mostly used to ping if a client or AP are still
+ * reachable and responsive. This implies tx status reports must be
+ * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+ * come to a conclusion that the other end disappeared and tear down
+ * BSS connection or it can never disconnect from BSS/client (which is
+ * the case).
+ *
+ * Firmware with HTT older than 3.0 delivers incorrect tx status for
+ * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+ * which seems to deliver correct tx reports for NullFunc frames. The
+ * downside of using it is it ignores client powersave state so it can
+ * end up disconnecting sleeping clients in AP mode. It should fix STA
+ * mode though because AP don't sleep.
+ */
+ if (ar->htt.target_version_major < 3 &&
+ (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+ * NativeWifi txmode - it selects AP key instead of peer key. It seems
+ * to work with Ethernet txmode so use it.
+ */
+ if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+ return ATH10K_HW_TXRX_ETHERNET;
+
+ return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
* Control in the header.
*/
@@ -2317,16 +3192,42 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
- /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
- * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
- * used only for CQM purposes (e.g. hostapd station keepalive ping) so
- * it is safe to downgrade to NullFunc.
+ /* Some firmware revisions don't handle sending QoS NullFunc well.
+ * These frames are mainly used for CQM purposes so it doesn't really
+ * matter whether QoS NullFunc or NullFunc are sent.
*/
hdr = (void *)skb->data;
- if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
- }
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rfc1042_hdr *rfc1042;
+ struct ethhdr *eth;
+ size_t hdrlen;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ __be16 type;
+
+ hdr = (void *)skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ rfc1042 = (void *)skb->data + hdrlen;
+
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ type = rfc1042->snap_type;
+
+ skb_pull(skb, hdrlen + sizeof(*rfc1042));
+ skb_push(skb, sizeof(*eth));
+
+ eth = (void *)skb->data;
+ ether_addr_copy(eth->h_dest, da);
+ ether_addr_copy(eth->h_source, sa);
+ eth->h_proto = type;
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
@@ -2365,45 +3266,51 @@ static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
ar->htt.target_version_minor >= 4);
}
-static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
int ret = 0;
- if (ar->htt.target_version_major >= 3) {
- /* Since HTT 3.0 there is no separate mgmt tx command */
- ret = ath10k_htt_tx(&ar->htt, skb);
- goto exit;
+ spin_lock_bh(&ar->data_lock);
+
+ if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+ ret = -ENOSPC;
+ goto unlock;
}
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features)) {
- if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
- ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn(ar, "reached WMI management transmit queue limit\n");
- ret = -EBUSY;
- goto exit;
- }
+ __skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
- skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
- } else {
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- }
- } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features) &&
- ieee80211_is_nullfunc(hdr->frame_control)) {
- /* FW does not report tx status properly for NullFunc frames
- * unless they are sent through mgmt tx path. mac80211 sends
- * those frames when it detects link/beacon loss and depends
- * on the tx status to be correct. */
- ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
- } else {
- ret = ath10k_htt_tx(&ar->htt, skb);
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = 0;
+
+ switch (cb->txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ case ATH10K_HW_TXRX_ETHERNET:
+ ret = ath10k_htt_tx(htt, skb);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->fw_features))
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ else if (ar->htt.target_version_major >= 3)
+ ret = ath10k_htt_tx(htt, skb);
+ else
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
}
-exit:
if (ret) {
ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
ret);
@@ -2433,6 +3340,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
const u8 *peer_addr;
int vdev_id;
int ret;
+ unsigned long time_left;
/* FW requirement: We must create a peer before FW will send out
* an offchannel frame. Otherwise the frame will be stuck and
@@ -2465,7 +3373,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer_addr, vdev_id);
if (!peer) {
- ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+ ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+ WMI_PEER_TYPE_DEFAULT);
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
@@ -2476,11 +3385,11 @@ void ath10k_offchan_tx_work(struct work_struct *work)
ar->offchan_tx_skb = skb;
spin_unlock_bh(&ar->data_lock);
- ath10k_tx_htt(ar, skb);
+ ath10k_mac_tx(ar, skb);
- ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
- 3 * HZ);
- if (ret == 0)
+ time_left =
+ wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+ if (time_left == 0)
ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
@@ -2700,21 +3609,38 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+ ATH10K_SKB_CB(skb)->htt.freq = 0;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+ ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
+ ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
- /* it makes no sense to process injected frames like that */
- if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+ switch (ATH10K_SKB_CB(skb)->txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
ath10k_tx_h_nwifi(hw, skb);
ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ /* FIXME: Packet injection isn't implemented. It should be
+ * doable with firmware 10.2 on qca988x.
+ */
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return;
}
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -2736,7 +3662,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
}
}
- ath10k_tx_htt(ar, skb);
+ ath10k_mac_tx(ar, skb);
}
/* Must not be called with conf_mutex held as workers can use that also. */
@@ -2761,11 +3687,13 @@ void ath10k_halt(struct ath10k *ar)
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
ar->filter_flags = 0;
ar->monitor = false;
+ ar->monitor_arvif = NULL;
if (ar->monitor_started)
ath10k_monitor_stop(ar);
ar->monitor_started = false;
+ ar->tx_paused = 0;
ath10k_scan_finish(ar);
ath10k_peer_cleanup_all(ar);
@@ -2859,6 +3787,7 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
+ u32 burst_enable;
int ret = 0;
/*
@@ -2913,6 +3842,24 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_adaptive_qcs(ar, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+ burst_enable = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+ goto err_core_stop;
+ }
+ }
+
if (ar->cfg_tx_chainmask)
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
ar->cfg_rx_chainmask);
@@ -2934,10 +3881,21 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->ani_enable, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ani by default: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ ar->ani_enabled = true;
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
ath10k_spectral_start(ar);
+ ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -2991,42 +3949,15 @@ static int ath10k_config_ps(struct ath10k *ar)
return ret;
}
-static const char *chandef_get_width(enum nl80211_chan_width width)
-{
- switch (width) {
- case NL80211_CHAN_WIDTH_20_NOHT:
- return "20 (noht)";
- case NL80211_CHAN_WIDTH_20:
- return "20";
- case NL80211_CHAN_WIDTH_40:
- return "40";
- case NL80211_CHAN_WIDTH_80:
- return "80";
- case NL80211_CHAN_WIDTH_80P80:
- return "80+80";
- case NL80211_CHAN_WIDTH_160:
- return "160";
- case NL80211_CHAN_WIDTH_5:
- return "5";
- case NL80211_CHAN_WIDTH_10:
- return "10";
- }
- return "?";
-}
-
-static void ath10k_config_chan(struct ath10k *ar)
+static void ath10k_mac_chan_reconfigure(struct ath10k *ar)
{
struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
int ret;
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
- ar->chandef.chan->center_freq,
- ar->chandef.center_freq1,
- ar->chandef.center_freq2,
- chandef_get_width(ar->chandef.width));
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac chan reconfigure\n");
/* First stop monitor interface. Some FW versions crash if there's a
* lone monitor interface. */
@@ -3060,7 +3991,20 @@ static void ath10k_config_chan(struct ath10k *ar)
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
continue;
- ret = ath10k_vdev_restart(arvif);
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ continue;
+
+ ret = ath10k_vdev_restart(arvif, &def);
if (ret) {
ath10k_warn(ar, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3147,26 +4091,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ar->conf_mutex);
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac config channel %dMHz flags 0x%x radar %d\n",
- conf->chandef.chan->center_freq,
- conf->chandef.chan->flags,
- conf->radar_enabled);
-
- spin_lock_bh(&ar->data_lock);
- ar->rx_channel = conf->chandef.chan;
- spin_unlock_bh(&ar->data_lock);
-
- ar->radar_enabled = conf->radar_enabled;
- ath10k_recalc_radar_detection(ar);
-
- if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
- ar->chandef = conf->chandef;
- ath10k_config_chan(ar);
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_PS)
ath10k_config_ps(ar);
@@ -3208,6 +4132,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u32 value;
int bit;
+ int i;
u32 vdev_param;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@@ -3220,6 +4145,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vif = vif;
INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath10k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
if (ar->free_vdev_map == 0) {
ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
@@ -3262,6 +4198,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
+ /* Using vdev_id as queue number will make it very easy to do per-vif
+ * tx queue locking. This shouldn't wrap due to interface combinations
+ * but do a modulo for correctness sake and prevent using offchannel tx
+ * queues for regular vif tx.
+ */
+ vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
/* Some firmware revisions don't wait for beacon tx completion before
* sending another SWBA event. This could lead to hardware using old
* (freed) beacon data in some cases, e.g. tx credit starvation
@@ -3343,14 +4288,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
+ WMI_PEER_TYPE_DEFAULT);
if (ret) {
- ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+ }
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
@@ -3406,11 +4355,21 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = arvif;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ goto err_peer_delete;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
err_peer_delete:
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
err_vdev_delete:
@@ -3430,6 +4389,14 @@ err:
return ret;
}
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++)
+ ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -3437,6 +4404,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
+ cancel_work_sync(&arvif->ap_csa_work);
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -3451,11 +4421,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
vif->addr);
if (ret)
- ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
@@ -3472,7 +4443,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
/* Some firmware revisions don't notify host about self-peer removal
* until after associated vdev is deleted.
*/
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
vif->addr);
if (ret)
@@ -3486,6 +4458,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_peer_cleanup(ar, arvif->vdev_id);
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = NULL;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_mac_vif_tx_unlock_all(arvif);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -3493,8 +4476,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
* FIXME: Has to be verified.
*/
#define SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+ (FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
@@ -3615,6 +4597,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (ret)
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ info->use_cts_prot ? 1 : 0);
+ if (ret)
+ ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
+ info->use_cts_prot, arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -3791,10 +4780,14 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
* frames with multi-vif APs. This is not required for main firmware
* branch (e.g. 636).
*
- * FIXME: This has been tested only in AP. It remains unknown if this
- * is required for multi-vif STA interfaces on 10.1 */
+ * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+ *
+ * FIXME: It remains unknown if this is required for multi-vif STA
+ * interfaces on 10.1.
+ */
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
return;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
@@ -3826,8 +4819,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
const u8 *peer_addr;
bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104;
- bool def_idx = false;
int ret = 0;
+ int ret2;
+ u32 flags = 0;
+ u32 flags2;
+
+ /* this one needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ return 1;
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@@ -3843,6 +4842,13 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->hw_key_idx = key->keyidx;
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+ }
+
/* the peer should not disappear in mid-way (unless FW goes awry) since
* we already hold conf_mutex. we just make sure its there now. */
spin_lock_bh(&ar->data_lock);
@@ -3862,30 +4868,61 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
}
- if (is_wep) {
- if (cmd == SET_KEY)
- arvif->wep_keys[key->keyidx] = key;
- else
- arvif->wep_keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+ if (is_wep) {
if (cmd == DISABLE_KEY)
ath10k_clear_vdev_key(arvif, key);
- }
- /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
- * static WEP, do not set this flag for the keys whose key id
- * is greater than default key id.
- */
- if (arvif->def_wep_key_idx == -1)
- def_idx = true;
+ /* When WEP keys are uploaded it's possible that there are
+ * stations associated already (e.g. when merging) without any
+ * keys. Static WEP needs an explicit per-peer key upload.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ cmd == SET_KEY)
+ ath10k_mac_vif_update_wep_key(arvif, key);
+
+ /* 802.1x never sets the def_wep_key_idx so each set_key()
+ * call changes default tx key.
+ *
+ * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+ * after first set_key().
+ */
+ if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+ flags |= WMI_KEY_TX_USAGE;
+ }
- ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
if (ret) {
ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
+ /* mac80211 sets static WEP keys as groupwise while firmware requires
+ * them to be installed twice as both pairwise and groupwise.
+ */
+ if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+ flags2 = flags;
+ flags2 &= ~WMI_KEY_GROUP;
+ flags2 |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+ if (ret) {
+ ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+ peer_addr, flags);
+ if (ret2)
+ ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret2);
+ goto exit;
+ }
+ }
+
ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
spin_lock_bh(&ar->data_lock);
@@ -3933,6 +4970,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
}
arvif->def_wep_key_idx = keyidx;
+
unlock:
mutex_unlock(&arvif->ar->conf_mutex);
}
@@ -3943,6 +4981,10 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
struct ath10k_vif *arvif;
struct ath10k_sta *arsta;
struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps;
int err;
@@ -3951,6 +4993,13 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
arvif = arsta->arvif;
ar = arvif->ar;
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
spin_lock_bh(&ar->data_lock);
changed = arsta->changed;
@@ -3964,6 +5013,10 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mutex_lock(&ar->conf_mutex);
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
if (changed & IEEE80211_RC_BW_CHANGED) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
sta->addr, bw);
@@ -4011,14 +5064,14 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
-static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return 0;
if (ar->num_stations >= ar->max_num_stations)
@@ -4029,19 +5082,72 @@ static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
return 0;
}
-static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->conf_mutex);
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
return;
ar->num_stations--;
}
+struct ath10k_mac_tdls_iter_data {
+ u32 num_tdls_stations;
+ struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_mac_tdls_iter_data *iter_data = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta->tdls && sta_vif == iter_data->curr_vif)
+ iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_tdls_iter_data data = {};
+
+ data.curr_vif = vif;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath10k_mac_tdls_vif_stations_count_iter,
+ &data);
+ return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ int *num_tdls_vifs = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+ (*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+ int num_tdls_vifs = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_tdls_vifs_count_iter,
+ &num_tdls_vifs);
+ return num_tdls_vifs;
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -4072,41 +5178,80 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/*
* New station addition.
*/
+ enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+ u32 num_tdls_stations;
+ u32 num_tdls_vifs;
+
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
arvif->vdev_id, sta->addr,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
- ret = ath10k_mac_inc_num_stations(arvif);
+ ret = ath10k_mac_inc_num_stations(arvif, sta);
if (ret) {
ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
ar->max_num_stations);
goto exit;
}
- ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+ if (sta->tdls)
+ peer_type = WMI_PEER_TYPE_TDLS;
+
+ ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
+ peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_mac_dec_num_stations(arvif, sta);
goto exit;
}
- if (vif->type == NL80211_IFTYPE_STATION) {
- WARN_ON(arvif->is_started);
+ if (!sta->tdls)
+ goto exit;
+
+ num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+ num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
- ret = ath10k_vdev_start(arvif);
+ if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
+ num_tdls_stations == 0) {
+ ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+ arvif->vdev_id, ar->max_num_tdls_vdevs);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ if (num_tdls_stations == 0) {
+ /* This is the first tdls peer in current vif */
+ enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
+
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ state);
if (ret) {
- ath10k_warn(ar, "failed to start vdev %i: %d\n",
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
arvif->vdev_id, ret);
- WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
- sta->addr));
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
goto exit;
}
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_PEERING);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
- arvif->is_started = true;
+ if (num_tdls_stations != 0)
+ goto exit;
+ ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
}
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
@@ -4117,23 +5262,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
- if (vif->type == NL80211_IFTYPE_STATION) {
- WARN_ON(!arvif->is_started);
-
- ret = ath10k_vdev_stop(arvif);
- if (ret)
- ath10k_warn(ar, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
- }
-
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
- ath10k_mac_dec_num_stations(arvif);
+ ath10k_mac_dec_num_stations(arvif, sta);
+
+ if (!sta->tdls)
+ goto exit;
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+ goto exit;
+
+ /* This was the last tdls peer in current vif */
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ }
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -4149,9 +5297,30 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
+ new_state == IEEE80211_STA_AUTHORIZED &&
+ sta->tdls) {
+ /*
+ * Tdls station authorized.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_CONNECTED);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
/*
* Disassociation.
*/
@@ -4356,6 +5525,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_start_scan_arg arg;
int ret = 0;
+ u32 scan_time_msec;
mutex_lock(&ar->conf_mutex);
@@ -4382,7 +5552,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
if (ret)
goto exit;
- duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
memset(&arg, 0, sizeof(arg));
ath10k_wmi_start_scan_init(ar, &arg);
@@ -4390,11 +5560,12 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
arg.scan_id = ATH10K_SCAN_ID;
arg.n_channels = 1;
arg.channels[0] = chan->center_freq;
- arg.dwell_time_active = duration;
- arg.dwell_time_passive = duration;
- arg.max_scan_time = 2 * duration;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration_ms = duration;
ret = ath10k_start_scan(ar, &arg);
if (ret) {
@@ -4417,6 +5588,9 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
goto exit;
}
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
ret = 0;
exit:
mutex_unlock(&ar->conf_mutex);
@@ -4512,70 +5686,6 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
-#ifdef CONFIG_PM
-static int ath10k_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct ath10k *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
- if (ret) {
- if (ret == -ETIMEDOUT)
- goto resume;
- ret = 1;
- goto exit;
- }
-
- ret = ath10k_hif_suspend(ar);
- if (ret) {
- ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
- goto resume;
- }
-
- ret = 0;
- goto exit;
-resume:
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret)
- ath10k_warn(ar, "failed to resume target: %d\n", ret);
-
- ret = 1;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath10k_resume(struct ieee80211_hw *hw)
-{
- struct ath10k *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- ret = ath10k_hif_resume(ar);
- if (ret) {
- ath10k_warn(ar, "failed to resume hif: %d\n", ret);
- ret = 1;
- goto exit;
- }
-
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret) {
- ath10k_warn(ar, "failed to resume target: %d\n", ret);
- ret = 1;
- goto exit;
- }
-
- ret = 0;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-#endif
-
static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
@@ -4635,343 +5745,286 @@ exit:
return ret;
}
-/* Helper table for legacy fixed_rate/bitrate_mask */
-static const u8 cck_ofdm_rate[] = {
- /* CCK */
- 3, /* 1Mbps */
- 2, /* 2Mbps */
- 1, /* 5.5Mbps */
- 0, /* 11Mbps */
- /* OFDM */
- 3, /* 6Mbps */
- 7, /* 9Mbps */
- 2, /* 12Mbps */
- 6, /* 18Mbps */
- 1, /* 24Mbps */
- 5, /* 36Mbps */
- 0, /* 48Mbps */
- 4, /* 54Mbps */
-};
-
-/* Check if only one bit set */
-static int ath10k_check_single_mask(u32 mask)
-{
- int bit;
-
- bit = ffs(mask);
- if (!bit)
- return 0;
-
- mask &= ~BIT(bit - 1);
- if (mask)
- return 2;
-
- return 1;
-}
-
static bool
-ath10k_default_bitrate_mask(struct ath10k *ar,
- enum ieee80211_band band,
- const struct cfg80211_bitrate_mask *mask)
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
{
- u32 legacy = 0x00ff;
- u8 ht = 0xff, i;
- u16 vht = 0x3ff;
- u16 nrf = ar->num_rf_chains;
+ int num_rates = 0;
+ int i;
- if (ar->cfg_tx_chainmask)
- nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ num_rates += hweight32(mask->control[band].legacy);
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- legacy = 0x00fff;
- vht = 0;
- break;
- case IEEE80211_BAND_5GHZ:
- break;
- default:
- return false;
- }
-
- if (mask->control[band].legacy != legacy)
- return false;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
- for (i = 0; i < nrf; i++)
- if (mask->control[band].ht_mcs[i] != ht)
- return false;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
- for (i = 0; i < nrf; i++)
- if (mask->control[band].vht_mcs[i] != vht)
- return false;
-
- return true;
+ return num_rates == 1;
}
static bool
-ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_nss)
-{
- int ht_nss = 0, vht_nss = 0, i;
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
- /* check legacy */
- if (ath10k_check_single_mask(mask->control[band].legacy))
+ if (mask->control[band].legacy)
return false;
- /* check HT */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
- if (mask->control[band].ht_mcs[i] == 0xff)
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
continue;
- else if (mask->control[band].ht_mcs[i] == 0x00)
- break;
-
- return false;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
}
- ht_nss = i;
-
- /* check VHT */
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if (mask->control[band].vht_mcs[i] == 0x03ff)
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
continue;
- else if (mask->control[band].vht_mcs[i] == 0x0000)
- break;
-
- return false;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
}
- vht_nss = i;
-
- if (ht_nss > 0 && vht_nss > 0)
+ if (ht_nss_mask != vht_nss_mask)
return false;
- if (ht_nss)
- *fixed_nss = ht_nss;
- else if (vht_nss)
- *fixed_nss = vht_nss;
- else
+ if (ht_nss_mask == 0)
return false;
- return true;
-}
-
-static bool
-ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- enum wmi_rate_preamble *preamble)
-{
- int legacy = 0, ht = 0, vht = 0, i;
-
- *preamble = WMI_RATE_PREAMBLE_OFDM;
-
- /* check legacy */
- legacy = ath10k_check_single_mask(mask->control[band].legacy);
- if (legacy > 1)
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
return false;
- /* check HT */
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
- if (ht > 1)
- return false;
-
- /* check VHT */
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
- if (vht > 1)
- return false;
-
- /* Currently we support only one fixed_rate */
- if ((legacy + ht + vht) != 1)
- return false;
-
- if (ht)
- *preamble = WMI_RATE_PREAMBLE_HT;
- else if (vht)
- *preamble = WMI_RATE_PREAMBLE_VHT;
+ *nss = fls(ht_nss_mask);
return true;
}
-static bool
-ath10k_bitrate_mask_rate(struct ath10k *ar,
- const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_rate,
- u8 *fixed_nss)
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss)
{
- u8 rate = 0, pream = 0, nss = 0, i;
- enum wmi_rate_preamble preamble;
-
- /* Check if single rate correct */
- if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
- return false;
-
- pream = preamble;
-
- switch (preamble) {
- case WMI_RATE_PREAMBLE_CCK:
- case WMI_RATE_PREAMBLE_OFDM:
- i = ffs(mask->control[band].legacy) - 1;
-
- if (band == IEEE80211_BAND_2GHZ && i < 4)
- pream = WMI_RATE_PREAMBLE_CCK;
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
- if (band == IEEE80211_BAND_5GHZ)
- i += 4;
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
- if (i >= ARRAY_SIZE(cck_ofdm_rate))
- return false;
+ hw_rate = sband->bitrates[rate_idx].hw_value;
+ bitrate = sband->bitrates[rate_idx].bitrate;
- rate = cck_ofdm_rate[i];
- break;
- case WMI_RATE_PREAMBLE_HT:
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- if (mask->control[band].ht_mcs[i])
- break;
-
- if (i == IEEE80211_HT_MCS_MASK_LEN)
- return false;
-
- rate = ffs(mask->control[band].ht_mcs[i]) - 1;
- nss = i;
- break;
- case WMI_RATE_PREAMBLE_VHT:
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- if (mask->control[band].vht_mcs[i])
- break;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
- if (i == NL80211_VHT_NSS_MAX)
- return false;
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
- rate = ffs(mask->control[band].vht_mcs[i]) - 1;
- nss = i;
- break;
+ return 0;
}
- *fixed_nss = nss + 1;
- nss <<= 4;
- pream <<= 6;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
- pream, nss, rate);
-
- *fixed_rate = pream | nss | rate;
+ return 0;
+ }
+ }
- return true;
-}
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
-static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
- const struct cfg80211_bitrate_mask *mask,
- enum ieee80211_band band,
- u8 *fixed_rate,
- u8 *fixed_nss)
-{
- /* First check full NSS mask, if we can simply limit NSS */
- if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
- return true;
+ return 0;
+ }
+ }
- /* Next Check single rate is set */
- return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
+ return -EINVAL;
}
-static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
- u8 fixed_rate,
- u8 fixed_nss,
- u8 force_sgi)
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+ u8 rate, u8 nss, u8 sgi)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
- int ret = 0;
-
- mutex_lock(&ar->conf_mutex);
-
- if (arvif->fixed_rate == fixed_rate &&
- arvif->fixed_nss == fixed_nss &&
- arvif->force_sgi == force_sgi)
- goto exit;
+ int ret;
- if (fixed_rate == WMI_FIXED_RATE_NONE)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+ lockdep_assert_held(&ar->conf_mutex);
- if (force_sgi)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
vdev_param = ar->wmi.vdev_param->fixed_rate;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, fixed_rate);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
if (ret) {
ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
- fixed_rate, ret);
- ret = -EINVAL;
- goto exit;
+ rate, ret);
+ return ret;
}
- arvif->fixed_rate = fixed_rate;
-
vdev_param = ar->wmi.vdev_param->nss;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
- vdev_param, fixed_nss);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+ return ret;
+ }
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
if (ret) {
- ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
- fixed_nss, ret);
- ret = -EINVAL;
- goto exit;
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+ return ret;
}
- arvif->fixed_nss = fixed_nss;
+ return 0;
+}
- vdev_param = ar->wmi.vdev_param->sgi;
- ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
- force_sgi);
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+ enum ieee80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
- if (ret) {
- ath10k_warn(ar, "failed to set sgi param %d: %d\n",
- force_sgi, ret);
- ret = -EINVAL;
- goto exit;
+ /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+ * to express all VHT MCS rate masks. Effectively only the following
+ * ranges can be used: none, 0-7, 0-8 and 0-9.
+ */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+ return false;
+ }
}
- arvif->force_sgi = force_sgi;
+ return true;
+}
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+
+ if (arsta->arvif != arvif)
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
}
-static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const struct cfg80211_bitrate_mask *mask)
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
- enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
- u8 fixed_rate = WMI_FIXED_RATE_NONE;
- u8 fixed_nss = ar->num_rf_chains;
- u8 force_sgi;
+ enum ieee80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u8 rate;
+ u8 nss;
+ u8 sgi;
+ int single_nss;
+ int ret;
- if (ar->cfg_tx_chainmask)
- fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ if (ath10k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
- force_sgi = mask->control[band].gi;
- if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
- if (!ath10k_default_bitrate_mask(ar, band, mask)) {
- if (!ath10k_get_fixed_rate_nss(ar, mask, band,
- &fixed_rate,
- &fixed_nss))
+ if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min(ar->num_rf_chains,
+ max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
}
- if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
- return -EINVAL;
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
}
- return ath10k_set_fixed_rate_param(arvif, fixed_rate,
- fixed_nss, force_sgi);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
}
static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
@@ -5090,6 +6143,286 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
return -EINVAL;
}
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def *def = NULL;
+
+ /* Both locks are required because ar->rx_channel is modified. This
+ * allows readers to hold either lock.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+ * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+ * ppdu on Rx may reduce performance on low-end systems. It should be
+ * possible to make tables/hashmaps to speed the lookup up (be vary of
+ * cpu data cache lines though regarding sizes) but to keep the initial
+ * implementation simple and less intrusive fallback to the slow lookup
+ * only for multi-channel cases. Single-channel cases will remain to
+ * use the old channel derival and thus performance should not be
+ * affected much.
+ */
+ rcu_read_lock();
+ if (ath10k_mac_num_chanctxs(ar) == 1) {
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &def);
+ ar->rx_channel = def->chan;
+ } else {
+ ar->rx_channel = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath10k_mac_chan_ctx_init(struct ath10k *ar,
+ struct ath10k_chanctx *arctx,
+ struct ieee80211_chanctx_conf *conf)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ memset(arctx, 0, sizeof(*arctx));
+
+ arctx->conf = *conf;
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_chan_ctx_init(ar, arctx, ctx);
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx change freq %hu->%hu width %d->%d ptr %p changed %x\n",
+ arctx->conf.def.chan->center_freq,
+ ctx->def.chan->center_freq,
+ arctx->conf.def.width, ctx->def.width,
+ ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ spin_lock_bh(&ar->data_lock);
+ arctx->conf = *ctx;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+
+ /* FIXME: How to configure Rx chains properly? */
+
+ /* No other actions are actually necessary. Firmware maintains channel
+ * definitions per vdev internally and there's no host-side channel
+ * context abstraction to configure, e.g. channel width.
+ */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx assign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath10k_vdev_start(arvif, &arctx->conf.def);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arctx->conf.def.chan->center_freq, ret);
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ arvif->is_up = true;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_stop:
+ ath10k_vdev_stop(arvif);
+ arvif->is_started = false;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx unassign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ WARN_ON(!arvif->is_up);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ }
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ struct ath10k_chanctx *arctx_new, *arctx_old;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < n_vifs; i++) {
+ arvif = ath10k_vif_to_arvif(vifs[i].vif);
+ arctx_new = (void *)vifs[i].new_ctx->drv_priv;
+ arctx_old = (void *)vifs[i].old_ctx->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d ptr %p->%p\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width,
+ arctx_old, arctx_new);
+
+ if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
+ ath10k_mac_chan_ctx_init(ar, arctx_new,
+ vifs[i].new_ctx);
+ }
+
+ arctx_new->conf = *vifs[i].new_ctx;
+
+ /* FIXME: ath10k_mac_chan_reconfigure() uses current, i.e. not
+ * yet updated chanctx_conf pointer.
+ */
+ arctx_old->conf = *vifs[i].new_ctx;
+ }
+ ath10k_mac_update_rx_channel(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* FIXME: Reconfigure only affected vifs */
+ ath10k_mac_chan_reconfigure(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -5114,31 +6447,31 @@ static const struct ieee80211_ops ath10k_ops = {
.get_antenna = ath10k_get_antenna,
.reconfig_complete = ath10k_reconfig_complete,
.get_survey = ath10k_get_survey,
- .set_bitrate_mask = ath10k_set_bitrate_mask,
+ .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
.get_et_strings = ath10k_debug_get_et_strings,
+ .add_chanctx = ath10k_mac_op_add_chanctx,
+ .remove_chanctx = ath10k_mac_op_remove_chanctx,
+ .change_chanctx = ath10k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
#ifdef CONFIG_PM
- .suspend = ath10k_suspend,
- .resume = ath10k_resume,
+ .suspend = ath10k_wow_op_suspend,
+ .resume = ath10k_wow_op_resume,
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
#endif
};
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
- .bitrate = (_rate), \
- .flags = (_flags), \
- .hw_value = (_rateid), \
-}
-
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.hw_value = (_channel), \
@@ -5194,6 +6527,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(132, 5660, 0),
CHAN5G(136, 5680, 0),
CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
CHAN5G(149, 5745, 0),
CHAN5G(153, 5765, 0),
CHAN5G(157, 5785, 0),
@@ -5201,31 +6535,6 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
CHAN5G(165, 5825, 0),
};
-/* Note: Be careful if you re-order these. There is code which depends on this
- * ordering.
- */
-static struct ieee80211_rate ath10k_rates[] = {
- /* CCK */
- RATETAB_ENT(10, 0x82, 0),
- RATETAB_ENT(20, 0x84, 0),
- RATETAB_ENT(55, 0x8b, 0),
- RATETAB_ENT(110, 0x96, 0),
- /* OFDM */
- RATETAB_ENT(60, 0x0c, 0),
- RATETAB_ENT(90, 0x12, 0),
- RATETAB_ENT(120, 0x18, 0),
- RATETAB_ENT(180, 0x24, 0),
- RATETAB_ENT(240, 0x30, 0),
- RATETAB_ENT(360, 0x48, 0),
- RATETAB_ENT(480, 0x60, 0),
- RATETAB_ENT(540, 0x6c, 0),
-};
-
-#define ath10k_a_rates (ath10k_rates + 4)
-#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
-#define ath10k_g_rates (ath10k_rates + 0)
-#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
-
struct ath10k *ath10k_mac_create(size_t priv_size)
{
struct ieee80211_hw *hw;
@@ -5299,15 +6608,92 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
},
};
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 3,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
+ u32 val;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ val = ar->num_rf_chains - 1;
+ val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ val = ar->num_rf_chains - 1;
+ val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
mcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_rf_chains)
@@ -5438,6 +6824,10 @@ int ath10k_mac_register(struct ath10k *ar)
ht_cap = ath10k_get_ht_cap(ar);
vht_cap = ath10k_create_vht_cap(ar);
+ BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+ ARRAY_SIZE(ath10k_5ghz_channels)) !=
+ ATH10K_NUM_CHANS);
+
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
channels = kmemdup(ath10k_2ghz_channels,
sizeof(ath10k_2ghz_channels),
@@ -5492,24 +6882,31 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
- ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_AP_LINK_PS |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SW_CRYPTO_CONTROL;
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
- ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
- ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
}
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -5517,6 +6914,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->chanctx_data_size = sizeof(struct ath10k_chanctx);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
@@ -5533,6 +6931,9 @@ int ath10k_mac_register(struct ath10k *ar)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
}
+ if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
+ ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -5540,20 +6941,46 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ret = ath10k_wow_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init wow: %d\n", ret);
+ goto err_free;
+ }
+
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
*/
- ar->hw->queues = 4;
+ ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+ /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+ * something that vdev_ids can't reach so that we don't stop the queue
+ * accidentally.
+ */
+ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
switch (ar->wmi.op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_if_comb);
ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_tlv_qcs_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_if_comb);
+ }
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 68296117d203..b291f063705c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -23,11 +23,22 @@
#define WEP_KEYID_SHIFT 6
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
struct ath10k_generic_iter {
struct ath10k *ar;
int ret;
};
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
struct ath10k *ath10k_mac_create(size_t priv_size);
void ath10k_mac_destroy(struct ath10k *ar);
int ath10k_mac_register(struct ath10k *ar);
@@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
void ath10k_drain_tx(struct ath10k *ar);
bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644
index 000000000000..c0b6ffaf3ec1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!noa->num_descriptors &&
+ !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath10k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath10k_p2p_noa_ie_fill(ie, len, noa);
+ ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644
index 000000000000..7be616e2e121
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 7681237fe298..17a060e8efa2 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
.src_sz_max = 2048,
- .dest_nentries = 32,
+ .dest_nentries = 128,
},
/* CE3: host->target WMI */
@@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
+ .nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
@@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ RTC_STATE_ADDRESS);
+
+ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+ int tot_delay = 0;
+ int curr_delay = 5;
+
+ while (tot_delay < PCIE_WAKE_TIMEOUT) {
+ if (ath10k_pci_is_awake(ar))
+ return 0;
+
+ udelay(curr_delay);
+ tot_delay += curr_delay;
+
+ if (curr_delay < 50)
+ curr_delay += 5;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ /* This function can be called very frequently. To avoid excessive
+ * CPU stalls for MMIO reads use a cache var to hold the device state.
+ */
+ if (!ar_pci->ps_awake) {
+ __ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ if (ret == 0) {
+ ar_pci->ps_wake_refcount++;
+ WARN_ON(ar_pci->ps_wake_refcount == 0);
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+ goto skip;
+
+ ar_pci->ps_wake_refcount--;
+
+ mod_timer(&ar_pci->ps_timer, jiffies +
+ msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(unsigned long ptr)
+{
+ struct ath10k *ar = (void *)ptr;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (ar_pci->ps_wake_refcount > 0)
+ goto skip;
+
+ __ath10k_pci_sleep(ar);
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ del_timer_sync(&ar_pci->ps_timer);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ __ath10k_pci_sleep(ar);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+ value, offset, ret);
+ return;
+ }
+
+ iowrite32(value, ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+}
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val;
+ int ret;
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+ offset, ret);
+ return 0xffffffff;
+ }
+
+ val = ioread32(ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+
+ return val;
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
static bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@@ -793,45 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
}
-static bool ath10k_pci_is_awake(struct ath10k *ar)
-{
- u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
-
- return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
-}
-
-static int ath10k_pci_wake_wait(struct ath10k *ar)
-{
- int tot_delay = 0;
- int curr_delay = 5;
-
- while (tot_delay < PCIE_WAKE_TIMEOUT) {
- if (ath10k_pci_is_awake(ar))
- return 0;
-
- udelay(curr_delay);
- tot_delay += curr_delay;
-
- if (curr_delay < 50)
- curr_delay += 5;
- }
-
- return -ETIMEDOUT;
-}
-
-static int ath10k_pci_wake(struct ath10k *ar)
-{
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
- PCIE_SOC_WAKE_V_MASK);
- return ath10k_pci_wake_wait(ar);
-}
-
-static void ath10k_pci_sleep(struct ath10k *ar)
-{
- ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
- PCIE_SOC_WAKE_RESET);
-}
-
/* Called by lower (CE) layer when a send to Target completes. */
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
{
@@ -1212,11 +1372,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
static int ath10k_pci_hif_start(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
+ pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ ar_pci->link_ctl);
+
return 0;
}
@@ -1329,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar)
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
/* Most likely the device has HTT Rx ring configured. The only way to
@@ -1347,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1524,12 +1695,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
return 3;
case QCA6174_HW_1_3_CHIP_ID_REV:
return 2;
- case QCA6174_HW_2_1_CHIP_ID_REV:
- case QCA6174_HW_2_2_CHIP_ID_REV:
- return 6;
case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV:
@@ -1967,15 +2137,15 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_err(ar, "failed to wake up target: %d\n", ret);
- return ret;
- }
+ pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ &ar_pci->link_ctl);
+ pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
/*
* Bring the target up cleanly.
@@ -2023,7 +2193,6 @@ err_ce:
ath10k_pci_ce_deinit(ar);
err_sleep:
- ath10k_pci_sleep(ar);
return ret;
}
@@ -2034,28 +2203,18 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
/* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there's no point in resetting here.
*/
-
- ath10k_pci_sleep(ar);
}
#ifdef CONFIG_PM
-#define ATH10K_PCI_PM_CONTROL 0x44
-
static int ath10k_pci_hif_suspend(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- struct pci_dev *pdev = ar_pci->pdev;
- u32 val;
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0x3) {
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- (val & 0xffffff00) | 0x03);
- }
+ /* The grace timer can still be counting down and ar->ps_awake be true.
+ * It is known that the device may be asleep after resuming regardless
+ * of the SoC powersave state before suspending. Hence make sure the
+ * device is asleep before proceeding.
+ */
+ ath10k_pci_sleep_sync(ar);
return 0;
}
@@ -2066,22 +2225,14 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0) {
- pci_restore_state(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- val & 0xffffff00);
- /*
- * Suspend/Resume resets the PCI configuration space,
- * so we have to re-disable the RETRY_TIMEOUT register (0x41)
- * to keep PCI Tx retries from interfering with C3 CPU state
- */
- pci_read_config_dword(pdev, 0x40, &val);
-
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- }
+ /* Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+ * from interfering with C3 CPU state. pci_restore_state won't help
+ * here since it only restores the first 64 bytes pci config header.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
return 0;
}
@@ -2497,7 +2648,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
- u32 lcr_val;
int ret;
pci_set_drvdata(pdev, ar);
@@ -2531,10 +2681,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
pci_set_master(pdev);
- /* Workaround: Disable ASPM */
- pci_read_config_dword(pdev, 0x80, &lcr_val);
- pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
-
/* Arrange for access to Target SoC registers. */
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
@@ -2621,9 +2767,19 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->dev = &pdev->dev;
ar_pci->ar = ar;
+ if (pdev->subsystem_vendor || pdev->subsystem_device)
+ scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
+ "%04x:%04x:%04x:%04x",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
spin_lock_init(&ar_pci->ce_lock);
+ spin_lock_init(&ar_pci->ps_lock);
+
setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
(unsigned long)ar);
+ setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
+ (unsigned long)ar);
ret = ath10k_pci_claim(ar);
if (ret) {
@@ -2631,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_core_destroy;
}
- ret = ath10k_pci_wake(ar);
- if (ret) {
- ath10k_err(ar, "failed to wake up: %d\n", ret);
- goto err_release;
- }
-
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2678,11 +2828,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
- goto err_sleep;
+ goto err_free_irq;
}
- ath10k_pci_sleep(ar);
-
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2702,9 +2850,6 @@ err_free_pipes:
ath10k_pci_free_pipes(ar);
err_sleep:
- ath10k_pci_sleep(ar);
-
-err_release:
ath10k_pci_release(ar);
err_core_destroy:
@@ -2734,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
+ ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
}
@@ -2770,7 +2916,19 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index bddf54320160..d7696ddc03c4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -185,6 +185,41 @@ struct ath10k_pci {
/* Map CE id to ce_state */
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
+
+ /* Due to HW quirks it is recommended to disable ASPM during device
+ * bootup. To do that the original PCI-E Link Control is stored before
+ * device bootup is executed and re-programmed later.
+ */
+ u16 link_ctl;
+
+ /* Protects ps_awake and ps_wake_refcount */
+ spinlock_t ps_lock;
+
+ /* The device has a special powersave-oriented register. When device is
+ * considered asleep it drains less power and driver is forbidden from
+ * accessing most MMIO registers. If host were to access them without
+ * waking up the device might scribble over host memory or return
+ * 0xdeadbeef readouts.
+ */
+ unsigned long ps_wake_refcount;
+
+ /* Waking up takes some time (up to 2ms in some cases) so it can be bad
+ * for latency. To mitigate this the device isn't immediately allowed
+ * to sleep after all references are undone - instead there's a grace
+ * period after which the powersave register is updated unless some
+ * activity to/from device happened in the meantime.
+ *
+ * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+ */
+ struct timer_list ps_timer;
+
+ /* MMIO registers are used to communicate with the device. With
+ * intensive traffic accessing powersave register would be a bit
+ * wasteful overhead and would needlessly stall CPU. It is far more
+ * efficient to rely on a variable in RAM and update it only upon
+ * powersave register state changes.
+ */
+ bool ps_awake;
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -209,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
* for this device; but that's not guaranteed.
*/
#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \
- (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \
+ (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \
CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \
0x100000 | ((addr) & 0xfffff))
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
-/* Target exposes its registers for direct access. However before host can
- * access them it needs to make sure the target is awake (ath10k_pci_wake,
- * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
- * to sleep unless host tells it to (ath10k_pci_sleep).
- *
- * If host tries to access target registers without waking it up it can
- * scribble over host memory.
- *
- * If target is asleep waking it up may take up to even 2ms.
- */
-
-static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
- u32 value)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- iowrite32(value, ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- return ioread32(ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
-{
- return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
-}
-
-static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
-{
- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
-}
-
-static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
-static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
- iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
+ */
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index e9cc7787bf5f..492b5a5af434 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -661,6 +661,28 @@ struct rx_msdu_end {
#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+enum rx_ppdu_start_rate {
+ RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
+ RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
+ RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
+ RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
+ RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
+ RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
+ RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
+ RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
+
+ RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
+ RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
+ RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
+ RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
+ RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
+ RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
+ RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
+};
+
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index d22addf6118b..8dcd424aa502 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
int ath10k_spectral_create(struct ath10k *ar)
{
+ /* The buffer size covers whole channels in dual bands up to 128 bins.
+ * Scan with bigger than 128 bins needs to be run on single band each.
+ */
ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
ar->debug.debugfs_phy,
- 1024, 256,
+ 1140, 2500,
&rfs_spec_scan_cb, NULL);
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index aede750809fe..1a899d70dc5d 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -23,102 +23,50 @@
#include "debug.h"
#include "wmi-ops.h"
-static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
- enum wmi_vdev_type type)
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
{
- struct ath10k_vif *arvif;
- int count = 0;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->is_started)
- continue;
-
- if (!arvif->is_up)
- continue;
-
- if (arvif->vdev_type != type)
- continue;
-
- count++;
- }
- return count;
-}
-
-static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+ *state = ATH10K_THERMAL_THROTTLE_MAX;
return 0;
}
-static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long *state)
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
{
struct ath10k *ar = cdev->devdata;
mutex_lock(&ar->conf_mutex);
- *state = ar->thermal.duty_cycle;
+ *state = ar->thermal.throttle_state;
mutex_unlock(&ar->conf_mutex);
return 0;
}
-static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
- unsigned long duty_cycle)
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
{
struct ath10k *ar = cdev->devdata;
- u32 period, duration, enabled;
- int num_bss, ret = 0;
- mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH10K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
- ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
- duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
- ret = -EINVAL;
- goto out;
- }
- /* TODO: Right now, thermal mitigation is handled only for single/multi
- * vif AP mode. Since quiet param is not validated in STA mode, it needs
- * to be investigated further to handle multi STA and multi-vif (AP+STA)
- * mode properly.
- */
- num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
- if (!num_bss) {
- ath10k_warn(ar, "no active AP interfaces\n");
- ret = -ENETDOWN;
- goto out;
- }
- period = max(ATH10K_QUIET_PERIOD_MIN,
- (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
- duration = (period * duty_cycle) / 100;
- enabled = duration ? 1 : 0;
-
- ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
- ATH10K_QUIET_START_OFFSET,
- enabled);
- if (ret) {
- ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
- period, duration, enabled, ret);
- goto out;
+ if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+ ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
}
- ar->thermal.duty_cycle = duty_cycle;
-out:
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.throttle_state = throttle_state;
+ ath10k_thermal_set_throttling(ar);
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static struct thermal_cooling_device_ops ath10k_thermal_ops = {
- .get_max_state = ath10k_thermal_get_max_dutycycle,
- .get_cur_state = ath10k_thermal_get_cur_dutycycle,
- .set_cur_state = ath10k_thermal_set_cur_dutycycle,
+ .get_max_state = ath10k_thermal_get_max_throttle_state,
+ .get_cur_state = ath10k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath10k_thermal_set_cur_throttle_state,
};
static ssize_t ath10k_thermal_show_temp(struct device *dev,
@@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
{
struct ath10k *ar = dev_get_drvdata(dev);
int ret, temperature;
+ unsigned long time_left;
mutex_lock(&ar->conf_mutex);
@@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
goto out;
}
- ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
- ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
- if (ret == 0) {
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
ath10k_warn(ar, "failed to synchronize thermal read\n");
ret = -ETIMEDOUT;
goto out;
@@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = {
};
ATTRIBUTE_GROUPS(ath10k_hwmon);
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+ u32 period, duration, enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return;
+
+ period = ar->thermal.quiet_period;
+ duration = (period * ar->thermal.throttle_state) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ }
+}
+
int ath10k_thermal_register(struct ath10k *ar)
{
struct thermal_cooling_device *cdev;
@@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar)
ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
"cooling_device");
if (ret) {
- ath10k_err(ar, "failed to create thermal symlink\n");
+ ath10k_err(ar, "failed to create cooling device symlink\n");
goto err_cooling_destroy;
}
ar->thermal.cdev = cdev;
+ ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
/* Do not register hwmon device when temperature reading is not
* supported by firmware
@@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar)
return 0;
err_remove_link:
- sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
err_cooling_destroy:
thermal_cooling_device_unregister(cdev);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index bccc17ae0fde..b610ea5caae8 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -19,16 +19,17 @@
#define ATH10K_QUIET_PERIOD_DEFAULT 100
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
-#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
#define ATH10K_HWMON_NAME_LEN 15
#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
struct thermal_cooling_device *cdev;
struct completion wmi_sync;
/* protected by conf_mutex */
- u32 duty_cycle;
+ u32 throttle_state;
+ u32 quiet_period;
/* temperature value in Celcius degree
* protected by data_lock
*/
@@ -39,6 +40,7 @@ struct ath10k_thermal {
int ath10k_thermal_register(struct ath10k *ar);
void ath10k_thermal_unregister(struct ath10k *ar);
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
#else
static inline int ath10k_thermal_register(struct ath10k *ar)
{
@@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
{
}
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
#endif
#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 5407887380ab..71bdb368813d 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -21,11 +21,16 @@
#include "core.h"
#if !defined(_TRACE_H_)
-static inline u32 ath10k_frm_hdr_len(const void *buf)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
{
const struct ieee80211_hdr *hdr = buf;
- return ieee80211_hdrlen(hdr->frame_control);
+ /* In some rare cases (e.g. fcs error) device reports frame buffer
+ * shorter than what frame header implies (e.g. len = 0). The buffer
+ * can still be accessed so do a simple min() to guarantee caller
+ * doesn't get value greater than len.
+ */
+ return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
}
#endif
@@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {}
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath10k
-#define ATH10K_MSG_MAX 200
+#define ATH10K_MSG_MAX 400
DECLARE_EVENT_CLASS(ath10k_log_event,
TP_PROTO(struct ath10k *ar, struct va_format *vaf),
@@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
- __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- __entry->len = ath10k_frm_hdr_len(data);
+ __entry->len = ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(data), data, __entry->len);
),
@@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
- __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+ __dynamic_array(u8, payload, (len -
+ ath10k_frm_hdr_len(data, len)))
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
- __entry->len = len - ath10k_frm_hdr_len(data);
+ __entry->len = len - ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(payload),
- data + ath10k_frm_hdr_len(data), __entry->len);
+ data + ath10k_frm_hdr_len(data, len), __entry->len);
),
TP_printk(
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 3f00cec8aef5..826500bb2b1b 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
lockdep_assert_held(&htt->tx_lock);
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
- tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
+ tx_done->msdu_id, !!tx_done->discard,
+ !!tx_done->no_ack, !!tx_done->success);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
@@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
if (tx_done->no_ack)
info->flags &= ~IEEE80211_TX_STAT_ACK;
+ if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c8b64e7a6089..47fe2e756bec 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -45,6 +45,10 @@ struct wmi_ops {
struct wmi_rdy_ev_arg *arg);
int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats);
+ int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg);
+ int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -81,7 +85,8 @@ struct wmi_ops {
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN]);
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
@@ -148,6 +153,27 @@ struct wmi_ops {
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
+ struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+ struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id,
+ const u8 *pattern,
+ const u8 *mask,
+ int pattern_len,
+ int pattern_offset);
+ struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id);
+ struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_tdls_state state);
+ struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan);
+ struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -274,6 +300,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
}
static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_roam_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_wow_event)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
+static inline int
ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
@@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_peer_create)
return -EOPNOTSUPP;
- skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_enable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+ pattern, mask, pattern_len,
+ pattern_offset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_del_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_update_fw_tdls_state)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_tdls_peer_update)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_adaptive_qcs)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index ee0c5f602e29..563fde73623c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -16,10 +16,13 @@
*/
#include "core.h"
#include "debug.h"
+#include "mac.h"
#include "hw.h"
+#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
+#include "p2p.h"
/***************/
/* TLV helpers */
@@ -31,9 +34,9 @@ struct wmi_tlv_policy {
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TLV_TAG_ARRAY_BYTE]
- = { .min_len = sizeof(u8) },
+ = { .min_len = 0 },
[WMI_TLV_TAG_ARRAY_UINT32]
- = { .min_len = sizeof(u32) },
+ = { .min_len = 0 },
[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
= { .min_len = sizeof(struct wmi_scan_event) },
[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
@@ -62,6 +65,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+ [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+ [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+ = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+ [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
};
static int
@@ -168,6 +179,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
{
const void **tb;
const struct wmi_tlv_bcn_tx_status_ev *ev;
+ struct ath10k_vif *arvif;
u32 vdev_id, tx_status;
int ret;
@@ -201,6 +213,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
break;
}
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif && arvif->is_up && arvif->vif->csa_active)
+ ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
kfree(tb);
return 0;
}
@@ -296,6 +312,83 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
return 0;
}
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_p2p_noa_ev *ev;
+ const struct wmi_p2p_noa_info *noa;
+ int ret, vdev_id;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+ noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+ vdev_id, noa->num_descriptors);
+
+ ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_tx_pause_ev *ev;
+ int ret, vdev_id;
+ u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ pause_id = __le32_to_cpu(ev->pause_id);
+ action = __le32_to_cpu(ev->action);
+ vdev_map = __le32_to_cpu(ev->vdev_map);
+ peer_id = __le32_to_cpu(ev->peer_id);
+ tid_map = __le32_to_cpu(ev->tid_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+ pause_id, action, vdev_map, peer_id, tid_map);
+
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
/***********/
/* TLV ops */
/***********/
@@ -417,6 +510,12 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_DIAG_EVENTID:
ath10k_wmi_tlv_event_diag(ar, skb);
break;
+ case WMI_TLV_P2P_NOA_EVENTID:
+ ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+ break;
+ case WMI_TLV_TX_PAUSE_EVENTID:
+ ath10k_wmi_tlv_event_tx_pause(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -1012,6 +1111,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
return 0;
}
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_roam_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+ arg->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_wow_event_info *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+ arg->flag = __le32_to_cpu(ev->flag);
+ arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+ arg->data_len = __le32_to_cpu(ev->data_len);
+
+ kfree(tb);
+ return 0;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@@ -1160,8 +1318,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
- cfg->num_offload_peers = __cpu_to_le32(3);
- cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+ cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
} else {
cfg->num_offload_peers = __cpu_to_le32(0);
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
@@ -1178,8 +1336,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
cfg->rx_decap_mode = __cpu_to_le32(1);
cfg->scan_max_pending_reqs = __cpu_to_le32(4);
- cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
- cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
cfg->num_mcast_groups = __cpu_to_le32(0);
cfg->num_mcast_table_elems = __cpu_to_le32(0);
@@ -1193,11 +1351,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
cfg->max_frag_entries = __cpu_to_le32(2);
- cfg->num_tdls_vdevs = __cpu_to_le32(1);
+ cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
cfg->num_multicast_filter_entries = __cpu_to_le32(5);
- cfg->num_wow_filters = __cpu_to_le32(0x16);
+ cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
cfg->num_keep_alive_pattern = __cpu_to_le32(6);
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
@@ -1248,7 +1406,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
cmd = (void *)tlv->value;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- cmd->burst_duration_ms = __cpu_to_le32(0);
+ cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
cmd->num_channels = __cpu_to_le32(arg->n_channels);
cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
@@ -1408,8 +1566,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
void *ptr;
u32 flags = 0;
- if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -1782,7 +1938,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
static struct sk_buff *
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct wmi_tlv_peer_create_cmd *cmd;
struct wmi_tlv *tlv;
@@ -1797,7 +1954,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+ cmd->peer_type = __cpu_to_le32(peer_type);
ether_addr_copy(cmd->peer_addr.addr, peer_addr);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
@@ -2027,7 +2184,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
if (!mac)
return ERR_PTR(-EINVAL);
- skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2485,6 +2642,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_tdls_set_state_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+ /* Set to options from wmi_tlv_tdls_options,
+ * for now none of them are enabled.
+ */
+ u32 options = 0;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capab *peer_cap;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ void *ptr;
+ int len;
+ int i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*peer_cap) +
+ sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+ tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+ peer_cap = (void *)tlv->value;
+ peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*peer_cap);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+ ptr += sizeof(*tlv);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*chan));
+ chan = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*chan);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->enable = __cpu_to_le32(1);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_tlv_wow_add_del_event_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->is_add = __cpu_to_le32(enable);
+ cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_host_wakeup_ind *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id, const u8 *pattern,
+ const u8 *bitmask, int pattern_len,
+ int pattern_offset)
+{
+ struct wmi_tlv_wow_add_pattern_cmd *cmd;
+ struct wmi_tlv_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* cmd */
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+ tlv->len = __cpu_to_le16(sizeof(*bitmap));
+ bitmap = (void *)tlv->value;
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+ bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = __cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+ bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(sizeof(u32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id)
+{
+ struct wmi_tlv_wow_del_pattern_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct wmi_tlv_adaptive_qcs *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
@@ -2609,6 +3147,9 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
};
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -2736,6 +3277,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+ .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
@@ -2781,6 +3324,14 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+ .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+ .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+ .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+ .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+ .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+ .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
};
/************/
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index a6c8280cc4b1..ad655c44afdb 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev {
__le32 num_chan_stats;
} __packed;
+struct wmi_tlv_p2p_noa_ev {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+ __le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+struct wmi_tlv_wow_bitmap_pattern {
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+ WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+} __packed;
+
+enum {
+ WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB 5
+
+struct wmi_tdls_peer_capab {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+ __le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ * Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ * Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+ WMI_TLV_TX_PAUSE_ID_MCC = 1,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+ WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+ WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+ WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+ WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+ WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+ WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+ WMI_TLV_TX_PAUSE_ACTION_STOP,
+ WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+ __le32 pause_id;
+ __le32 action;
+ __le32 vdev_map;
+ __le32 peer_id;
+ __le32 tid_map;
+} __packed;
+
void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index c7ea77edce24..0fabe689179c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -26,6 +26,7 @@
#include "mac.h"
#include "testmode.h"
#include "wmi-ops.h"
+#include "p2p.h"
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
@@ -884,20 +885,24 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
- ret = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- return ret;
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
{
- int ret;
+ unsigned long time_left;
- ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
- WMI_UNIFIED_READY_TIMEOUT_HZ);
- return ret;
+ time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
}
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
@@ -1351,63 +1356,6 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
return band;
}
-static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
-{
- u8 rate_idx = 0;
-
- /* rate in Kbps */
- switch (rate) {
- case 1000:
- rate_idx = 0;
- break;
- case 2000:
- rate_idx = 1;
- break;
- case 5500:
- rate_idx = 2;
- break;
- case 11000:
- rate_idx = 3;
- break;
- case 6000:
- rate_idx = 4;
- break;
- case 9000:
- rate_idx = 5;
- break;
- case 12000:
- rate_idx = 6;
- break;
- case 18000:
- rate_idx = 7;
- break;
- case 24000:
- rate_idx = 8;
- break;
- case 36000:
- rate_idx = 9;
- break;
- case 48000:
- rate_idx = 10;
- break;
- case 54000:
- rate_idx = 11;
- break;
- default:
- break;
- }
-
- if (band == IEEE80211_BAND_5GHZ) {
- if (rate_idx > 3)
- /* Omit CCK rates */
- rate_idx -= 4;
- else
- rate_idx = 0;
- }
-
- return rate_idx;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -1489,6 +1437,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
struct wmi_mgmt_rx_ev_arg arg = {};
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
+ struct ieee80211_supported_band *sband;
u32 rx_status;
u32 channel;
u32 phy_mode;
@@ -1559,9 +1508,11 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ sband = &ar->mac.sbands[status->band];
+
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
- status->rate_idx = get_rate_idx(rate, status->band);
+ status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
@@ -1585,6 +1536,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
}
}
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath10k_mac_handle_beacon(ar, skb);
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
@@ -1691,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
survey = &ar->survey[idx];
survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
- survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+ survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->noise = noise_floor;
survey->filled = SURVEY_INFO_TIME |
- SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_NOISE_DBM;
}
@@ -2276,109 +2230,25 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
tim->bitmap_ctrl, pvm_len);
}
-static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
- const struct wmi_p2p_noa_info *noa)
-{
- struct ieee80211_p2p_noa_attr *noa_attr;
- u8 ctwindow_oppps = noa->ctwindow_oppps;
- u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
- bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
- __le16 *noa_attr_len;
- u16 attr_len;
- u8 noa_descriptors = noa->num_descriptors;
- int i;
-
- /* P2P IE */
- data[0] = WLAN_EID_VENDOR_SPECIFIC;
- data[1] = len - 2;
- data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
- data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
- data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
- data[5] = WLAN_OUI_TYPE_WFA_P2P;
-
- /* NOA ATTR */
- data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
- noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
- noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
-
- noa_attr->index = noa->index;
- noa_attr->oppps_ctwindow = ctwindow;
- if (oppps)
- noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
-
- for (i = 0; i < noa_descriptors; i++) {
- noa_attr->desc[i].count =
- __le32_to_cpu(noa->descriptors[i].type_count);
- noa_attr->desc[i].duration = noa->descriptors[i].duration;
- noa_attr->desc[i].interval = noa->descriptors[i].interval;
- noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
- }
-
- attr_len = 2; /* index + oppps_ctwindow */
- attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
- *noa_attr_len = __cpu_to_le16(attr_len);
-}
-
-static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
-{
- u32 len = 0;
- u8 noa_descriptors = noa->num_descriptors;
- u8 opp_ps_info = noa->ctwindow_oppps;
- bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
-
- if (!noa_descriptors && !opps_enabled)
- return len;
-
- len += 1 + 1 + 4; /* EID + len + OUI */
- len += 1 + 2; /* noa attr + attr len */
- len += 1 + 1; /* index + oppps_ctwindow */
- len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-
- return len;
-}
-
static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
const struct wmi_p2p_noa_info *noa)
{
- u8 *new_data, *old_data = arvif->u.ap.noa_data;
- u32 new_len;
-
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
return;
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
- if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
- new_len = ath10k_p2p_calc_noa_ie_len(noa);
- if (!new_len)
- goto cleanup;
- new_data = kmalloc(new_len, GFP_ATOMIC);
- if (!new_data)
- goto cleanup;
-
- ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
-
- spin_lock_bh(&ar->data_lock);
- arvif->u.ap.noa_data = new_data;
- arvif->u.ap.noa_len = new_len;
- spin_unlock_bh(&ar->data_lock);
- kfree(old_data);
- }
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+ ath10k_p2p_noa_update(arvif, noa);
if (arvif->u.ap.noa_data)
if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
memcpy(skb_put(bcn, arvif->u.ap.noa_len),
arvif->u.ap.noa_data,
arvif->u.ap.noa_len);
- return;
-cleanup:
- spin_lock_bh(&ar->data_lock);
- arvif->u.ap.noa_data = NULL;
- arvif->u.ap.noa_len = 0;
- spin_unlock_bh(&ar->data_lock);
- kfree(old_data);
+ return;
}
static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -2555,6 +2425,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
+ struct ieee80211_channel *ch;
struct pulse_event pe;
u64 tsf64;
u8 rssi, width;
@@ -2583,6 +2454,15 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
if (!ar->dfs_detector)
return;
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->rx_channel;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch) {
+ ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+ goto radar_detected;
+ }
+
/* report event to DFS pattern detector */
tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
@@ -2598,10 +2478,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
rssi = 0;
pe.ts = tsf64;
- pe.freq = ar->hw->conf.chandef.chan->center_freq;
+ pe.freq = ch->center_freq;
pe.width = width;
pe.rssi = rssi;
-
+ pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
pe.freq, pe.width, pe.rssi, pe.ts);
@@ -2614,6 +2494,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
return;
}
+radar_detected:
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
ATH10K_DFS_STAT_INC(ar, radar_detected);
@@ -2872,7 +2753,43 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+ struct wmi_roam_ev_arg arg = {};
+ int ret;
+ u32 vdev_id;
+ u32 reason;
+ s32 rssi;
+
+ ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+ return;
+ }
+
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+ reason = __le32_to_cpu(arg.reason);
+ rssi = __le32_to_cpu(arg.rssi);
+ rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ vdev_id, reason, rssi);
+
+ if (reason >= WMI_ROAM_REASON_MAX)
+ ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+
+ switch (reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath10k_mac_handle_beacon_miss(ar, vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+ break;
+ }
}
void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
@@ -2942,7 +2859,19 @@ void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+ struct wmi_wow_ev_arg ev = {};
+ int ret;
+
+ complete(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+ wow_reason(ev.wake_reason));
}
void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
@@ -3231,6 +3160,21 @@ static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ struct wmi_roam_ev *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+
+ return 0;
+}
+
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@@ -3989,6 +3933,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
features = WMI_10_2_RX_BATCH_MODE;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ features |= WMI_10_2_COEX_GPIO;
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -4315,8 +4261,6 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
const char *cmdname;
u32 flags = 0;
- if (WARN_ON(arg->ssid && arg->ssid_len == 0))
- return ERR_PTR(-EINVAL);
if (WARN_ON(arg->hidden_ssid && !arg->ssid))
return ERR_PTR(-EINVAL);
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -4539,7 +4483,8 @@ ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
static struct sk_buff *
ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
- const u8 peer_addr[ETH_ALEN])
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
{
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
@@ -5223,6 +5168,7 @@ static const struct wmi_ops wmi_ops = {
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5268,6 +5214,7 @@ static const struct wmi_ops wmi_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_1_ops = {
@@ -5290,6 +5237,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5330,6 +5278,7 @@ static const struct wmi_ops wmi_10_1_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
static const struct wmi_ops wmi_10_2_ops = {
@@ -5353,6 +5302,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5413,6 +5363,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_swba = ath10k_wmi_op_pull_swba_ev,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5452,6 +5403,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
};
int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index adf935bf0580..cad72ae76253 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -148,6 +148,8 @@ enum wmi_service {
WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
WMI_SERVICE_MDNS_OFFLOAD,
WMI_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_ATF,
+ WMI_SERVICE_COEX_GPIO,
/* keep last */
WMI_SERVICE_MAX,
@@ -177,6 +179,8 @@ enum wmi_10x_service {
WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
WMI_10X_SERVICE_FORCE_FW_HANG,
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10X_SERVICE_ATF,
+ WMI_10X_SERVICE_COEX_GPIO,
};
enum wmi_main_service {
@@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ATF);
+ SVCSTR(WMI_SERVICE_COEX_GPIO);
default:
return NULL;
}
@@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -552,6 +562,9 @@ struct wmi_cmd_map {
u32 gpio_output_cmdid;
u32 pdev_get_temperature_cmdid;
u32 vdev_set_wmm_params_cmdid;
+ u32 tdls_set_state_cmdid;
+ u32 tdls_peer_update_cmdid;
+ u32 adaptive_qcs_cmdid;
};
/*
@@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x {
enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
+ WMI_10_2_COEX_GPIO = BIT(3),
};
struct wmi_resource_config_10_2 {
@@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg {
u32 max_scan_time;
u32 probe_delay;
u32 scan_ctrl_flags;
+ u32 burst_duration_ms;
u32 ie_len;
u32 n_channels;
@@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd {
struct wmi_mac_addr peer_macaddr;
} __packed;
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
struct wmi_peer_delete_cmd {
__le32 vdev_id;
struct wmi_mac_addr peer_macaddr;
@@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event {
} __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
-
-/* FIXME: empirically extrapolated */
-#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
@@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+} __packed;
+
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg {
const u8 *mac_addr;
};
+struct wmi_roam_ev_arg {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+};
+
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
} __packed;
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+
+enum wmi_tdls_state {
+ WMI_TDLS_DISABLE,
+ WMI_TDLS_ENABLE_PASSIVE,
+ WMI_TDLS_ENABLE_ACTIVE,
+};
+
+enum wmi_tdls_peer_state {
+ WMI_TDLS_PEER_STATE_PEERING,
+ WMI_TDLS_PEER_STATE_CONNECTED,
+ WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+ u32 vdev_id;
+ enum wmi_tdls_peer_state peer_state;
+ u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+struct wmi_tdls_peer_capab_arg {
+ u8 peer_uapsd_queues;
+ u8 peer_max_sp;
+ u32 buff_sta_support;
+ u32 off_chan_support;
+ u32 peer_curr_operclass;
+ u32 self_curr_operclass;
+ u32 peer_chan_len;
+ u32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ u32 is_peer_responder;
+ u32 pref_offchan_num;
+ u32 pref_offchan_bw;
+};
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644
index 000000000000..a68d8fd853a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath10k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ /* fall through */
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ int j;
+
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+
+ ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ patterns[i].pattern,
+ bitmask,
+ patterns[i].pattern_len,
+ patterns[i].pkt_offset);
+ if (ret) {
+ ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath10k_wow_wakeup(ar);
+
+cleanup:
+ ath10k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_wakeup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+ return 0;
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+ return -EINVAL;
+
+ ar->wow.wowlan_support = ath10k_wowlan_support;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644
index 000000000000..abbb04b6d1bd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 7ca0d6f930fd..e22b0e778927 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1280,7 +1280,6 @@ struct ath5k_hw {
DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_PROMISC 1
#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
#define ATH_STAT_RESET 4 /* hw reset */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a6131825c9f6..23552f43d125 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2537,12 +2537,12 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
/* Initialize driver private data */
SET_IEEE80211_DEV(hw, ah->dev);
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_RC_TABLE;
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index ca4b7ccd697f..803030fd17d3 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -124,7 +124,7 @@ ath5k_led_brightness_set(struct led_classdev *led_dev,
static int
ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
- const char *name, char *trigger)
+ const char *name, const char *trigger)
{
int err;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 3b4a6463d87a..dc44cfef7517 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -369,7 +369,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *new_flags, u64 multicast)
{
#define SUPPORTED_FIF_FLAGS \
- (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
+ (FIF_ALLMULTI | FIF_FCSFAIL | \
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
@@ -393,16 +393,6 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
(AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
AR5K_RX_FILTER_MCAST);
- if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
- if (*new_flags & FIF_PROMISC_IN_BSS)
- __set_bit(ATH_STAT_PROMISC, ah->status);
- else
- __clear_bit(ATH_STAT_PROMISC, ah->status);
- }
-
- if (test_bit(ATH_STAT_PROMISC, ah->status))
- rfilt |= AR5K_RX_FILTER_PROM;
-
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
if (*new_flags & FIF_ALLMULTI) {
mfilt[0] = ~0;
@@ -418,8 +408,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
rfilt |= AR5K_RX_FILTER_BEACON;
- /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
- * set we should only pass on control frames for this
+ /* FIF_CONTROL doc says we should only pass on control frames for this
* station. This needs testing. I believe right now this
* enables *all* control frames, which is OK.. but
* but we should see if we can improve on granularity */
@@ -809,7 +798,6 @@ const struct ieee80211_ops ath5k_hw_ops = {
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
.get_stats = ath5k_get_stats,
- /* .get_tkip_seq = not implemented */
/* .set_frag_threshold = not implemented */
/* .set_rts_threshold = not implemented */
/* .sta_add = not implemented */
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index cce4625a53ad..a511ef3614b9 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -889,7 +889,7 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
cfg80211_disconnected(vif->ndev, proto_reason,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, false, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
@@ -3467,7 +3467,7 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
GFP_KERNEL);
break;
case SME_CONNECTED:
- cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL);
break;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 6c23d279525f..8f8793004b9f 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
return 0;
}
-/**
- * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
- struct ath9k_channel *chan)
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan, int bin)
{
- int bb_spur = AR_NO_SPUR;
- int bin, cur_bin;
- int spur_freq_sd;
- int spur_delta_phase;
- int denominator;
+ int cur_bin;
int upper, lower, cur_vit_mask;
- int tmp, new;
int i;
- static int pilot_mask_reg[4] = {
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int8_t mask_amt;
+ int tmp_mask;
+ static const int pilot_mask_reg[4] = {
AR_PHY_TIMING7, AR_PHY_TIMING8,
AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
};
- static int chan_mask_reg[4] = {
+ static const int chan_mask_reg[4] = {
AR_PHY_TIMING9, AR_PHY_TIMING10,
AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
};
- static int inc[4] = { 0, 100, 0, 0 };
-
- int8_t mask_m[123];
- int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
- int cur_bb_spur;
- bool is2GHz = IS_CHAN_2GHZ(chan);
-
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
- for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
- cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
- if (AR_NO_SPUR == cur_bb_spur)
- break;
- cur_bb_spur = cur_bb_spur - (chan->channel * 10);
- if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
- bb_spur = cur_bb_spur;
- break;
- }
- }
-
- if (AR_NO_SPUR == bb_spur)
- return;
-
- bin = bb_spur * 32;
-
- tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
- new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
- AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
- AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
- AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
- REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
- new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
- AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
- AR_PHY_SPUR_REG_MASK_RATE_SELECT |
- AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
- SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
- REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
- spur_delta_phase = ((bb_spur * 524288) / 100) &
- AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
- denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
- spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
- new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
- SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
- SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
- REG_WRITE(ah, AR_PHY_TIMING11, new);
+ static const int inc[4] = { 0, 100, 0, 0 };
cur_bin = -6000;
upper = bin + 100;
@@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
int pilot_mask = 0;
int chan_mask = 0;
int bp = 0;
+
for (bp = 0; bp < 30; bp++) {
if ((cur_bin > lower) && (cur_bin < upper)) {
pilot_mask = pilot_mask | 0x1 << bp;
@@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
for (i = 0; i < 123; i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
@@ -467,6 +406,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
}
/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int bb_spur = AR_NO_SPUR;
+ int bin;
+ int spur_freq_sd;
+ int spur_delta_phase;
+ int denominator;
+ int tmp, new;
+ int i;
+
+ int8_t mask_m[123];
+ int8_t mask_p[123];
+ int cur_bb_spur;
+ bool is2GHz = IS_CHAN_2GHZ(chan);
+
+ memset(&mask_m, 0, sizeof(int8_t) * 123);
+ memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+ for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+ cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+ if (AR_NO_SPUR == cur_bb_spur)
+ break;
+ cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+ if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+ bb_spur = cur_bb_spur;
+ break;
+ }
+ }
+
+ if (AR_NO_SPUR == bb_spur)
+ return;
+
+ bin = bb_spur * 32;
+
+ tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+ new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+ AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+ AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+ AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+ REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+ new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+ AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+ AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+ AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+ SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+ REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+ spur_delta_phase = ((bb_spur * 524288) / 100) &
+ AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+ denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+ spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+ new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+ SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+ SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+ REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+ ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
+}
+
+/**
* ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
* @ah: atheros hardware structure
*
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index fc08162b5820..db6624527d99 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
{
int bb_spur = AR_NO_SPUR;
int freq;
- int bin, cur_bin;
+ int bin;
int bb_spur_off, spur_subchannel_sd;
int spur_freq_sd;
int spur_delta_phase;
int denominator;
- int upper, lower, cur_vit_mask;
int tmp, newVal;
int i;
- static const int pilot_mask_reg[4] = {
- AR_PHY_TIMING7, AR_PHY_TIMING8,
- AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
- };
- static const int chan_mask_reg[4] = {
- AR_PHY_TIMING9, AR_PHY_TIMING10,
- AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
- };
- static const int inc[4] = { 0, 100, 0, 0 };
struct chan_centers centers;
int8_t mask_m[123];
int8_t mask_p[123];
- int8_t mask_amt;
- int tmp_mask;
int cur_bb_spur;
bool is2GHz = IS_CHAN_2GHZ(chan);
@@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
- cur_bin = -6000;
- upper = bin + 100;
- lower = bin - 100;
-
- for (i = 0; i < 4; i++) {
- int pilot_mask = 0;
- int chan_mask = 0;
- int bp = 0;
- for (bp = 0; bp < 30; bp++) {
- if ((cur_bin > lower) && (cur_bin < upper)) {
- pilot_mask = pilot_mask | 0x1 << bp;
- chan_mask = chan_mask | 0x1 << bp;
- }
- cur_bin += 100;
- }
- cur_bin += inc[i];
- REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
- REG_WRITE(ah, chan_mask_reg[i], chan_mask);
- }
-
- cur_vit_mask = 6100;
- upper = bin + 120;
- lower = bin - 120;
-
- for (i = 0; i < 123; i++) {
- if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
- /* workaround for gcc bug #37014 */
- volatile int tmp_v = abs(cur_vit_mask - bin);
-
- if (tmp_v < 75)
- mask_amt = 1;
- else
- mask_amt = 0;
- if (cur_vit_mask < 0)
- mask_m[abs(cur_vit_mask / 100)] = mask_amt;
- else
- mask_p[cur_vit_mask / 100] = mask_amt;
- }
- cur_vit_mask -= 100;
- }
-
- tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
- | (mask_m[48] << 26) | (mask_m[49] << 24)
- | (mask_m[50] << 22) | (mask_m[51] << 20)
- | (mask_m[52] << 18) | (mask_m[53] << 16)
- | (mask_m[54] << 14) | (mask_m[55] << 12)
- | (mask_m[56] << 10) | (mask_m[57] << 8)
- | (mask_m[58] << 6) | (mask_m[59] << 4)
- | (mask_m[60] << 2) | (mask_m[61] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
- tmp_mask = (mask_m[31] << 28)
- | (mask_m[32] << 26) | (mask_m[33] << 24)
- | (mask_m[34] << 22) | (mask_m[35] << 20)
- | (mask_m[36] << 18) | (mask_m[37] << 16)
- | (mask_m[48] << 14) | (mask_m[39] << 12)
- | (mask_m[40] << 10) | (mask_m[41] << 8)
- | (mask_m[42] << 6) | (mask_m[43] << 4)
- | (mask_m[44] << 2) | (mask_m[45] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
- tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
- | (mask_m[18] << 26) | (mask_m[18] << 24)
- | (mask_m[20] << 22) | (mask_m[20] << 20)
- | (mask_m[22] << 18) | (mask_m[22] << 16)
- | (mask_m[24] << 14) | (mask_m[24] << 12)
- | (mask_m[25] << 10) | (mask_m[26] << 8)
- | (mask_m[27] << 6) | (mask_m[28] << 4)
- | (mask_m[29] << 2) | (mask_m[30] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
- tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
- | (mask_m[2] << 26) | (mask_m[3] << 24)
- | (mask_m[4] << 22) | (mask_m[5] << 20)
- | (mask_m[6] << 18) | (mask_m[7] << 16)
- | (mask_m[8] << 14) | (mask_m[9] << 12)
- | (mask_m[10] << 10) | (mask_m[11] << 8)
- | (mask_m[12] << 6) | (mask_m[13] << 4)
- | (mask_m[14] << 2) | (mask_m[15] << 0);
- REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
- tmp_mask = (mask_p[15] << 28)
- | (mask_p[14] << 26) | (mask_p[13] << 24)
- | (mask_p[12] << 22) | (mask_p[11] << 20)
- | (mask_p[10] << 18) | (mask_p[9] << 16)
- | (mask_p[8] << 14) | (mask_p[7] << 12)
- | (mask_p[6] << 10) | (mask_p[5] << 8)
- | (mask_p[4] << 6) | (mask_p[3] << 4)
- | (mask_p[2] << 2) | (mask_p[1] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
- tmp_mask = (mask_p[30] << 28)
- | (mask_p[29] << 26) | (mask_p[28] << 24)
- | (mask_p[27] << 22) | (mask_p[26] << 20)
- | (mask_p[25] << 18) | (mask_p[24] << 16)
- | (mask_p[23] << 14) | (mask_p[22] << 12)
- | (mask_p[21] << 10) | (mask_p[20] << 8)
- | (mask_p[19] << 6) | (mask_p[18] << 4)
- | (mask_p[17] << 2) | (mask_p[16] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
- tmp_mask = (mask_p[45] << 28)
- | (mask_p[44] << 26) | (mask_p[43] << 24)
- | (mask_p[42] << 22) | (mask_p[41] << 20)
- | (mask_p[40] << 18) | (mask_p[39] << 16)
- | (mask_p[38] << 14) | (mask_p[37] << 12)
- | (mask_p[36] << 10) | (mask_p[35] << 8)
- | (mask_p[34] << 6) | (mask_p[33] << 4)
- | (mask_p[32] << 2) | (mask_p[31] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
- tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
- | (mask_p[59] << 26) | (mask_p[58] << 24)
- | (mask_p[57] << 22) | (mask_p[56] << 20)
- | (mask_p[55] << 18) | (mask_p[54] << 16)
- | (mask_p[53] << 14) | (mask_p[52] << 12)
- | (mask_p[51] << 10) | (mask_p[50] << 8)
- | (mask_p[49] << 6) | (mask_p[48] << 4)
- | (mask_p[47] << 2) | (mask_p[46] << 0);
- REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
- REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+ ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
REGWRITE_BUFFER_FLUSH(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 5cee231cca1f..a8762711ad74 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -15,6 +15,7 @@
*/
#include <linux/relay.h>
+#include <linux/random.h>
#include "ath9k.h"
static s8 fix_rssi_inv_only(u8 rssi_val)
@@ -36,21 +37,480 @@ static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
}
+typedef int (ath_cmn_fft_idx_validator) (u8 *sample_end, int bytes_read);
+
+static int
+ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
+{
+ struct ath_ht20_mag_info *mag_info;
+ u8 *sample;
+ u16 max_magnitude;
+ u8 max_index;
+ u8 max_exp;
+
+ /* Sanity check so that we don't read outside the read
+ * buffer
+ */
+ if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN - 1)
+ return -1;
+
+ mag_info = (struct ath_ht20_mag_info *) (sample_end -
+ sizeof(struct ath_ht20_mag_info) + 1);
+
+ sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
+
+ max_index = spectral_max_index(mag_info->all_bins,
+ SPECTRAL_HT20_NUM_BINS);
+ max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+
+ max_exp = mag_info->max_exp & 0xf;
+
+ /* Don't try to read something outside the read buffer
+ * in case of a missing byte (so bins[0] will be outside
+ * the read buffer)
+ */
+ if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
+ return -1;
+
+ if (sample[max_index] != (max_magnitude >> max_exp))
+ return -1;
+ else
+ return 0;
+}
+
+static int
+ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
+{
+ struct ath_ht20_40_mag_info *mag_info;
+ u8 *sample;
+ u16 lower_mag, upper_mag;
+ u8 lower_max_index, upper_max_index;
+ u8 max_exp;
+ int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+
+ /* Sanity check so that we don't read outside the read
+ * buffer
+ */
+ if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN - 1)
+ return -1;
+
+ mag_info = (struct ath_ht20_40_mag_info *) (sample_end -
+ sizeof(struct ath_ht20_40_mag_info) + 1);
+
+ sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
+
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ lower_max_index = spectral_max_index(mag_info->lower_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ upper_max_index = spectral_max_index(mag_info->upper_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+
+ max_exp = mag_info->max_exp & 0xf;
+
+ /* Don't try to read something outside the read buffer
+ * in case of a missing byte (so bins[0] will be outside
+ * the read buffer)
+ */
+ if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN &&
+ ((upper_max_index < 1) || (lower_max_index < 1)))
+ return -1;
+
+ /* Some time hardware messes up the index and adds
+ * the index of the middle point (dc_pos). Try to fix it.
+ */
+ if ((upper_max_index - dc_pos > 0) &&
+ (sample[upper_max_index] == (upper_mag >> max_exp)))
+ upper_max_index -= dc_pos;
+
+ if ((lower_max_index - dc_pos > 0) &&
+ (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
+ lower_max_index -= dc_pos;
+
+ if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
+ (sample[lower_max_index] != (lower_mag >> max_exp)))
+ return -1;
+ else
+ return 0;
+}
+
+typedef int (ath_cmn_fft_sample_handler) (struct ath_rx_status *rs,
+ struct ath_spec_scan_priv *spec_priv,
+ u8 *sample_buf, u64 tsf, u16 freq, int chan_type);
+
+static int
+ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
+ struct ath_spec_scan_priv *spec_priv,
+ u8 *sample_buf,
+ u64 tsf, u16 freq, int chan_type)
+{
+ struct fft_sample_ht20 fft_sample_20;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath_ht20_mag_info *mag_info;
+ struct fft_sample_tlv *tlv;
+ int i = 0;
+ int ret = 0;
+ int dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+ u16 magnitude, tmp_mag, length;
+ u8 max_index, bitmap_w, max_exp;
+
+ length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+ fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+ fft_sample_20.tlv.length = __cpu_to_be16(length);
+ fft_sample_20.freq = __cpu_to_be16(freq);
+ fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ fft_sample_20.noise = ah->noise;
+
+ mag_info = (struct ath_ht20_mag_info *) (sample_buf +
+ SPECTRAL_HT20_NUM_BINS);
+
+ magnitude = spectral_max_magnitude(mag_info->all_bins);
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+ max_index = spectral_max_index(mag_info->all_bins,
+ SPECTRAL_HT20_NUM_BINS);
+ fft_sample_20.max_index = max_index;
+
+ bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+ fft_sample_20.bitmap_weight = bitmap_w;
+
+ max_exp = mag_info->max_exp & 0xf;
+ fft_sample_20.max_exp = max_exp;
+
+ fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+ memcpy(fft_sample_20.data, sample_buf, SPECTRAL_HT20_NUM_BINS);
+
+ ath_dbg(common, SPECTRAL_SCAN, "FFT HT20 frame: max mag 0x%X,"
+ "max_mag_idx %i\n",
+ magnitude >> max_exp,
+ max_index);
+
+ if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+ ret = -1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ fft_sample_20.data[dc_pos] = (fft_sample_20.data[dc_pos + 1] +
+ fft_sample_20.data[dc_pos - 1]) / 2;
+
+ /* Check if the maximum magnitude is indeed maximum,
+ * also if the maximum value was at dc_pos, calculate
+ * a new one (since value at dc_pos is invalid).
+ */
+ if (max_index == dc_pos) {
+ tmp_mag = 0;
+ for (i = 0; i < dc_pos; i++) {
+ if (fft_sample_20.data[i] > tmp_mag) {
+ tmp_mag = fft_sample_20.data[i];
+ fft_sample_20.max_index = i;
+ }
+ }
+
+ magnitude = tmp_mag << max_exp;
+ fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Calculated new lower max 0x%X at %i\n",
+ tmp_mag, fft_sample_20.max_index);
+ } else
+ for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++) {
+ if (fft_sample_20.data[i] == (magnitude >> max_exp))
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got max: 0x%X at index %i\n",
+ fft_sample_20.data[i], i);
+
+ if (fft_sample_20.data[i] > (magnitude >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got bin %i greater than max: 0x%X\n",
+ i, fft_sample_20.data[i]);
+ ret = -1;
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_20;
+
+ ath_debug_send_fft_sample(spec_priv, tlv);
+
+ return 0;
+}
+
+static int
+ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
+ struct ath_spec_scan_priv *spec_priv,
+ u8 *sample_buf,
+ u64 tsf, u16 freq, int chan_type)
+{
+ struct fft_sample_ht20_40 fft_sample_40;
+ struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+ struct ath_hw *ah = spec_priv->ah;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_ht20_40_mag_info *mag_info;
+ struct fft_sample_tlv *tlv;
+ int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+ int i = 0;
+ int ret = 0;
+ s16 ext_nf;
+ u16 lower_mag, upper_mag, tmp_mag, length;
+ s8 lower_rssi, upper_rssi;
+ u8 lower_max_index, upper_max_index;
+ u8 lower_bitmap_w, upper_bitmap_w, max_exp;
+
+ if (caldata)
+ ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+ caldata->nfCalHist[3].privNF);
+ else
+ ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+ length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+ fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+ fft_sample_40.tlv.length = __cpu_to_be16(length);
+ fft_sample_40.freq = __cpu_to_be16(freq);
+ fft_sample_40.channel_type = chan_type;
+
+ if (chan_type == NL80211_CHAN_HT40PLUS) {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+ fft_sample_40.lower_noise = ah->noise;
+ fft_sample_40.upper_noise = ext_nf;
+ } else {
+ lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+ upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+ fft_sample_40.lower_noise = ext_nf;
+ fft_sample_40.upper_noise = ah->noise;
+ }
+
+ fft_sample_40.lower_rssi = lower_rssi;
+ fft_sample_40.upper_rssi = upper_rssi;
+
+ mag_info = (struct ath_ht20_40_mag_info *) (sample_buf +
+ SPECTRAL_HT20_40_NUM_BINS);
+
+ lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+ upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+ lower_max_index = spectral_max_index(mag_info->lower_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+ fft_sample_40.lower_max_index = lower_max_index;
+
+ upper_max_index = spectral_max_index(mag_info->upper_bins,
+ SPECTRAL_HT20_40_NUM_BINS);
+ fft_sample_40.upper_max_index = upper_max_index;
+
+ lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+ fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+
+ upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+ fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+
+ max_exp = mag_info->max_exp & 0xf;
+ fft_sample_40.max_exp = max_exp;
+
+ fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+ memcpy(fft_sample_40.data, sample_buf, SPECTRAL_HT20_40_NUM_BINS);
+
+ ath_dbg(common, SPECTRAL_SCAN, "FFT HT20/40 frame: lower mag 0x%X,"
+ "lower_mag_idx %i, upper mag 0x%X,"
+ "upper_mag_idx %i\n",
+ lower_mag >> max_exp,
+ lower_max_index,
+ upper_mag >> max_exp,
+ upper_max_index);
+
+ /* Some time hardware messes up the index and adds
+ * the index of the middle point (dc_pos). Try to fix it.
+ */
+ if ((upper_max_index - dc_pos > 0) &&
+ (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
+ upper_max_index -= dc_pos;
+ fft_sample_40.upper_max_index = upper_max_index;
+ }
+
+ if ((lower_max_index - dc_pos > 0) &&
+ (fft_sample_40.data[lower_max_index - dc_pos] ==
+ (lower_mag >> max_exp))) {
+ lower_max_index -= dc_pos;
+ fft_sample_40.lower_max_index = lower_max_index;
+ }
+
+ /* Check if we got the expected magnitude values at
+ * the expected bins
+ */
+ if ((fft_sample_40.data[upper_max_index + dc_pos]
+ != (upper_mag >> max_exp)) ||
+ (fft_sample_40.data[lower_max_index]
+ != (lower_mag >> max_exp))) {
+ ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+ ret = -1;
+ }
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ fft_sample_40.data[dc_pos] = (fft_sample_40.data[dc_pos + 1] +
+ fft_sample_40.data[dc_pos - 1]) / 2;
+
+ /* Check if the maximum magnitudes are indeed maximum,
+ * also if the maximum value was at dc_pos, calculate
+ * a new one (since value at dc_pos is invalid).
+ */
+ if (lower_max_index == dc_pos) {
+ tmp_mag = 0;
+ for (i = 0; i < dc_pos; i++) {
+ if (fft_sample_40.data[i] > tmp_mag) {
+ tmp_mag = fft_sample_40.data[i];
+ fft_sample_40.lower_max_index = i;
+ }
+ }
+
+ lower_mag = tmp_mag << max_exp;
+ fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Calculated new lower max 0x%X at %i\n",
+ tmp_mag, fft_sample_40.lower_max_index);
+ } else
+ for (i = 0; i < dc_pos; i++) {
+ if (fft_sample_40.data[i] == (lower_mag >> max_exp))
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got lower mag: 0x%X at index %i\n",
+ fft_sample_40.data[i], i);
+
+ if (fft_sample_40.data[i] > (lower_mag >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got lower bin %i higher than max: 0x%X\n",
+ i, fft_sample_40.data[i]);
+ ret = -1;
+ }
+ }
+
+ if (upper_max_index == dc_pos) {
+ tmp_mag = 0;
+ for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+ if (fft_sample_40.data[i] > tmp_mag) {
+ tmp_mag = fft_sample_40.data[i];
+ fft_sample_40.upper_max_index = i;
+ }
+ }
+ upper_mag = tmp_mag << max_exp;
+ fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Calculated new upper max 0x%X at %i\n",
+ tmp_mag, i);
+ } else
+ for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+ if (fft_sample_40.data[i] == (upper_mag >> max_exp))
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got upper mag: 0x%X at index %i\n",
+ fft_sample_40.data[i], i);
+
+ if (fft_sample_40.data[i] > (upper_mag >> max_exp)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Got upper bin %i higher than max: 0x%X\n",
+ i, fft_sample_40.data[i]);
+
+ ret = -1;
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+
+ tlv = (struct fft_sample_tlv *)&fft_sample_40;
+
+ ath_debug_send_fft_sample(spec_priv, tlv);
+
+ return 0;
+}
+
+static inline void
+ath_cmn_copy_fft_frame(u8 *in, u8 *out, int sample_len, int sample_bytes)
+{
+ switch (sample_bytes - sample_len) {
+ case -1:
+ /* First byte missing */
+ memcpy(&out[1], in,
+ sample_len - 1);
+ break;
+ case 0:
+ /* Length correct, nothing to do. */
+ memcpy(out, in, sample_len);
+ break;
+ case 1:
+ /* MAC added 2 extra bytes AND first byte
+ * is missing.
+ */
+ memcpy(&out[1], in, 30);
+ out[31] = in[31];
+ memcpy(&out[32], &in[33],
+ sample_len - 32);
+ break;
+ case 2:
+ /* MAC added 2 extra bytes at bin 30 and 32,
+ * remove them.
+ */
+ memcpy(out, in, 30);
+ out[30] = in[31];
+ memcpy(&out[31], &in[33],
+ sample_len - 31);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
+{
+ int i = 0;
+ int ret = 0;
+ struct rchan *rc = spec_priv->rfs_chan_spec_scan;
+
+ for_each_online_cpu(i)
+ ret += relay_buf_full(rc->buf[i]);
+
+ i = num_online_cpus();
+
+ if (ret == i)
+ return 1;
+ else
+ return 0;
+}
+
/* returns 1 if this was a spectral frame, even if not handled. */
int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
struct ath_rx_status *rs, u64 tsf)
{
+ u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
- u8 num_bins, *bins, *vdata = (u8 *)hdr;
- struct fft_sample_ht20 fft_sample_20;
- struct fft_sample_ht20_40 fft_sample_40;
- struct fft_sample_tlv *tlv;
+ u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
- int dc_pos;
- u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+ int i;
+ int got_slen = 0;
+ u8 *sample_start;
+ int sample_bytes = 0;
+ int ret = 0;
+ u16 fft_len, sample_len, freq = ah->curchan->chan->center_freq;
enum nl80211_channel_type chan_type;
+ ath_cmn_fft_idx_validator *fft_idx_validator;
+ ath_cmn_fft_sample_handler *fft_handler;
/* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
* via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -68,140 +528,170 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
+ /* Output buffers are full, no need to process anything
+ * since there is no space to put the result anyway
+ */
+ ret = ath_cmn_is_fft_buf_full(spec_priv);
+ if (ret == 1) {
+ ath_dbg(common, SPECTRAL_SCAN, "FFT report ignored, no space "
+ "left on output buffers\n");
+ return 1;
+ }
+
chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
if ((chan_type == NL80211_CHAN_HT40MINUS) ||
(chan_type == NL80211_CHAN_HT40PLUS)) {
fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+ sample_len = SPECTRAL_HT20_40_SAMPLE_LEN;
num_bins = SPECTRAL_HT20_40_NUM_BINS;
- bins = (u8 *)fft_sample_40.data;
+ fft_idx_validator = &ath_cmn_max_idx_verify_ht20_40_fft;
+ fft_handler = &ath_cmn_process_ht20_40_fft;
} else {
fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+ sample_len = SPECTRAL_HT20_SAMPLE_LEN;
num_bins = SPECTRAL_HT20_NUM_BINS;
- bins = (u8 *)fft_sample_20.data;
- }
-
- /* Variation in the data length is possible and will be fixed later */
- if ((len > fft_len + 2) || (len < fft_len - 1))
- return 1;
-
- switch (len - fft_len) {
- case 0:
- /* length correct, nothing to do. */
- memcpy(bins, vdata, num_bins);
- break;
- case -1:
- /* first byte missing, duplicate it. */
- memcpy(&bins[1], vdata, num_bins - 1);
- bins[0] = vdata[0];
- break;
- case 2:
- /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
- memcpy(bins, vdata, 30);
- bins[30] = vdata[31];
- memcpy(&bins[31], &vdata[33], num_bins - 31);
- break;
- case 1:
- /* MAC added 2 extra bytes AND first byte is missing. */
- bins[0] = vdata[0];
- memcpy(&bins[1], vdata, 30);
- bins[31] = vdata[31];
- memcpy(&bins[32], &vdata[33], num_bins - 32);
- break;
- default:
- return 1;
+ fft_idx_validator = ath_cmn_max_idx_verify_ht20_fft;
+ fft_handler = &ath_cmn_process_ht20_fft;
}
- /* DC value (value in the middle) is the blind spot of the spectral
- * sample and invalid, interpolate it.
- */
- dc_pos = num_bins / 2;
- bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
- if ((chan_type == NL80211_CHAN_HT40MINUS) ||
- (chan_type == NL80211_CHAN_HT40PLUS)) {
- s8 lower_rssi, upper_rssi;
- s16 ext_nf;
- u8 lower_max_index, upper_max_index;
- u8 lower_bitmap_w, upper_bitmap_w;
- u16 lower_mag, upper_mag;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_ht20_40_mag_info *mag_info;
-
- if (caldata)
- ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
- caldata->nfCalHist[3].privNF);
- else
- ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
- length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
- fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
- fft_sample_40.tlv.length = __cpu_to_be16(length);
- fft_sample_40.freq = __cpu_to_be16(freq);
- fft_sample_40.channel_type = chan_type;
-
- if (chan_type == NL80211_CHAN_HT40PLUS) {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
-
- fft_sample_40.lower_noise = ah->noise;
- fft_sample_40.upper_noise = ext_nf;
- } else {
- lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
- upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-
- fft_sample_40.lower_noise = ext_nf;
- fft_sample_40.upper_noise = ah->noise;
+ ath_dbg(common, SPECTRAL_SCAN, "Got radar dump bw_info: 0x%X,"
+ "len: %i fft_len: %i\n",
+ radar_info->pulse_bw_info,
+ len,
+ fft_len);
+ sample_start = vdata;
+ for (i = 0; i < len - 2; i++) {
+ sample_bytes++;
+
+ /* Only a single sample received, no need to look
+ * for the sample's end, do the correction based
+ * on the packet's length instead. Note that hw
+ * will always put the radar_info structure on
+ * the end.
+ */
+ if (len <= fft_len + 2) {
+ sample_bytes = len - sizeof(struct ath_radar_info);
+ got_slen = 1;
}
- fft_sample_40.lower_rssi = lower_rssi;
- fft_sample_40.upper_rssi = upper_rssi;
-
- mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
- lower_mag = spectral_max_magnitude(mag_info->lower_bins);
- upper_mag = spectral_max_magnitude(mag_info->upper_bins);
- fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
- fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
- lower_max_index = spectral_max_index(mag_info->lower_bins);
- upper_max_index = spectral_max_index(mag_info->upper_bins);
- fft_sample_40.lower_max_index = lower_max_index;
- fft_sample_40.upper_max_index = upper_max_index;
- lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
- upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
- fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
- fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
- fft_sample_40.max_exp = mag_info->max_exp & 0xf;
- fft_sample_40.tsf = __cpu_to_be64(tsf);
-
- tlv = (struct fft_sample_tlv *)&fft_sample_40;
- } else {
- u8 max_index, bitmap_w;
- u16 magnitude;
- struct ath_ht20_mag_info *mag_info;
-
- length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
- fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
- fft_sample_20.tlv.length = __cpu_to_be16(length);
- fft_sample_20.freq = __cpu_to_be16(freq);
-
- fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
- fft_sample_20.noise = ah->noise;
-
- mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
- magnitude = spectral_max_magnitude(mag_info->all_bins);
- fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
- max_index = spectral_max_index(mag_info->all_bins);
- fft_sample_20.max_index = max_index;
- bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
- fft_sample_20.bitmap_weight = bitmap_w;
- fft_sample_20.max_exp = mag_info->max_exp & 0xf;
-
- fft_sample_20.tsf = __cpu_to_be64(tsf);
+ /* Search for the end of the FFT frame between
+ * sample_len - 1 and sample_len + 2. exp_max is 3
+ * bits long and it's the only value on the last
+ * byte of the frame so since it'll be smaller than
+ * the next byte (the first bin of the next sample)
+ * 90% of the time, we can use it as a separator.
+ */
+ if (vdata[i] <= 0x7 && sample_bytes >= sample_len - 1) {
+
+ /* Got a frame length within boundaries, there are
+ * four scenarios here:
+ *
+ * a) sample_len -> We got the correct length
+ * b) sample_len + 2 -> 2 bytes added around bin[31]
+ * c) sample_len - 1 -> The first byte is missing
+ * d) sample_len + 1 -> b + c at the same time
+ *
+ * When MAC adds 2 extra bytes, bin[31] and bin[32]
+ * have the same value, so we can use that for further
+ * verification in cases b and d.
+ */
+
+ /* Did we go too far ? If so we couldn't determine
+ * this sample's boundaries, discard any further
+ * data
+ */
+ if ((sample_bytes > sample_len + 2) ||
+ ((sample_bytes > sample_len) &&
+ (sample_start[31] != sample_start[32])))
+ break;
+
+ /* See if we got a valid frame by checking the
+ * consistency of mag_info fields. This is to
+ * prevent from "fixing" a correct frame.
+ * Failure is non-fatal, later frames may
+ * be valid.
+ */
+ if (!fft_idx_validator(&vdata[i], i)) {
+ ath_dbg(common, SPECTRAL_SCAN,
+ "Found valid fft frame at %i\n", i);
+ got_slen = 1;
+ }
+
+ /* We expect 1 - 2 more bytes */
+ else if ((sample_start[31] == sample_start[32]) &&
+ (sample_bytes >= sample_len) &&
+ (sample_bytes < sample_len + 2) &&
+ (vdata[i + 1] <= 0x7))
+ continue;
+
+ /* Try to distinguish cases a and c */
+ else if ((sample_bytes == sample_len - 1) &&
+ (vdata[i + 1] <= 0x7))
+ continue;
+
+ got_slen = 1;
+ }
- tlv = (struct fft_sample_tlv *)&fft_sample_20;
+ if (got_slen) {
+ ath_dbg(common, SPECTRAL_SCAN, "FFT frame len: %i\n",
+ sample_bytes);
+
+ /* Only try to fix a frame if it's the only one
+ * on the report, else just skip it.
+ */
+ if (sample_bytes != sample_len && len <= fft_len + 2) {
+ ath_cmn_copy_fft_frame(sample_start,
+ sample_buf, sample_len,
+ sample_bytes);
+
+ fft_handler(rs, spec_priv, sample_buf,
+ tsf, freq, chan_type);
+
+ memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
+
+ /* Mix the received bins to the /dev/random
+ * pool
+ */
+ add_device_randomness(sample_buf, num_bins);
+ }
+
+ /* Process a normal frame */
+ if (sample_bytes == sample_len) {
+ ret = fft_handler(rs, spec_priv, sample_start,
+ tsf, freq, chan_type);
+
+ /* Mix the received bins to the /dev/random
+ * pool
+ */
+ add_device_randomness(sample_start, num_bins);
+ }
+
+ /* Short report processed, break out of the
+ * loop.
+ */
+ if (len <= fft_len + 2)
+ break;
+
+ sample_start = &vdata[i + 1];
+
+ /* -1 to grab sample_len -1, -2 since
+ * they 'll get increased by one. In case
+ * of failure try to recover by going byte
+ * by byte instead.
+ */
+ if (ret == 0) {
+ i += num_bins - 2;
+ sample_bytes = num_bins - 2;
+ }
+ got_slen = 0;
+ }
}
- ath_debug_send_fft_sample(spec_priv, tlv);
-
+ i -= num_bins - 2;
+ if (len - i != sizeof(struct ath_radar_info))
+ ath_dbg(common, SPECTRAL_SCAN, "FFT report truncated"
+ "(bytes left: %i)\n",
+ len - i);
return 1;
}
EXPORT_SYMBOL(ath_cmn_process_fft);
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 82d9dd29652c..998743be9c67 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -66,6 +66,8 @@ struct ath_ht20_fft_packet {
} __packed;
#define SPECTRAL_HT20_TOTAL_DATA_LEN (sizeof(struct ath_ht20_fft_packet))
+#define SPECTRAL_HT20_SAMPLE_LEN (sizeof(struct ath_ht20_mag_info) +\
+ SPECTRAL_HT20_NUM_BINS)
/* Dynamic 20/40 mode:
*
@@ -101,6 +103,10 @@ struct ath_spec_scan_priv {
};
#define SPECTRAL_HT20_40_TOTAL_DATA_LEN (sizeof(struct ath_ht20_40_fft_packet))
+#define SPECTRAL_HT20_40_SAMPLE_LEN (sizeof(struct ath_ht20_40_mag_info) +\
+ SPECTRAL_HT20_40_NUM_BINS)
+
+#define SPECTRAL_SAMPLE_MAX_LEN SPECTRAL_HT20_40_SAMPLE_LEN
/* grabs the max magnitude from the all/upper/lower bins */
static inline u16 spectral_max_magnitude(u8 *bins)
@@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins)
}
/* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+static inline u8 spectral_max_index(u8 *bins, int num_bins)
{
s8 m = (bins[2] & 0xfc) >> 2;
-
- /* TODO: this still doesn't always report the right values ... */
- if (m > 32)
+ u8 zero_idx = num_bins / 2;
+
+ /* It's a 5 bit signed int, remove its sign and use one's
+ * complement interpretation to add the sign back to the 8
+ * bit int
+ */
+ if (m & 0x20) {
+ m &= ~0x20;
m |= 0xe0;
- else
- m &= ~0xe0;
+ }
+
+ /* Bring the zero point to the beginning
+ * instead of the middle so that we can use
+ * it for array lookup and that we don't deal
+ * with negative values later
+ */
+ m += zero_idx;
+
+ /* Sanity check to make sure index is within bounds */
+ if (m < 0 || m > num_bins - 1)
+ m = 0;
- return m + 29;
+ return m;
}
/* return the bitmap weight from the all/upper/lower bins */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index e82a0d4ce23f..5dbc617ecf8a 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
-#define OP_BT_PRIORITY_DETECTED BIT(3)
-#define OP_BT_SCAN BIT(4)
-#define OP_TSF_RESET BIT(6)
+#define OP_BT_PRIORITY_DETECTED 3
+#define OP_BT_SCAN 4
+#define OP_TSF_RESET 6
enum htc_op_flags {
HTC_FWFLAG_NO_RMW,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d7beefe60683..36218ee2bdbe 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
priv->spec_priv.ah = priv->ah;
priv->spec_priv.spec_config.enabled = 0;
- priv->spec_priv.spec_config.short_repeat = false;
+ priv->spec_priv.spec_config.short_repeat = true;
priv->spec_priv.spec_config.count = 8;
priv->spec_priv.spec_config.endless = false;
priv->spec_priv.spec_config.period = 0x12;
@@ -717,18 +717,18 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
struct ath_common *common = ath9k_hw_common(priv->ah);
struct base_eep_header *pBase;
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
if (ath9k_ps_enable)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(hw, SUPPORTS_PS);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 564923c0df87..b71f3072fd9a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1238,8 +1238,7 @@ out:
}
#define SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+ (FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index a0f58e2aa553..cc9648f844ae 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -872,14 +872,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
if (priv->rxfilter & FIF_PROBE_REQ)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
- /*
- * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
- * mode interface or when in monitor mode. AP mode does not need this
- * since it receives all in-BSS frames anyway.
- */
- if (((ah->opmode != NL80211_IFTYPE_AP) &&
- (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
- ah->is_monitoring)
+ if (ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
if (priv->rxfilter & FIF_CONTROL)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index c1d2d0340feb..e8454db17634 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
struct ath9k_channel *chan);
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+ struct ath9k_channel *chan, int bin);
void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
struct ath9k_channel *chan, int ht40_delta);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f8d11efa7b0f..eff0e5325e6a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -796,7 +796,7 @@ static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
if (!ath9k_is_chanctx_enabled())
return;
- hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
hw->queues = ATH9K_NUM_TX_QUEUES;
hw->offchannel_tx_hw_queue = hw->queues - 1;
hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
@@ -818,20 +818,20 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_RC_TABLE |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
if (ath9k_ps_enable)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(hw, SUPPORTS_PS);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
if (AR_SREV_9280_20_OR_LATER(ah))
hw->radiotap_mcs_details |=
@@ -839,7 +839,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
}
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR |
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b0badef71ce7..d285e3a89853 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1442,8 +1442,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
#define SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+ (FIF_ALLMULTI | \
FIF_CONTROL | \
FIF_PSPOLL | \
FIF_OTHER_BSS | \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6fb40ef86fd6..6c75fb1ab77d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -392,11 +392,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
rfilt |= ATH9K_RX_FILTER_PROBEREQ;
- /*
- * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
- * mode interface or when in monitor mode. AP mode does not need this
- * since it receives all in-BSS frames anyway.
- */
if (sc->sc_ah->is_monitoring)
rfilt |= ATH9K_RX_FILTER_PROM;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 47d5c2e910ad..88045f93a76c 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -286,7 +286,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
}
if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
- ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
dev_err(&ar->udev->dev, "firmware does not provide "
@@ -310,8 +310,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_RX_FILTER)) {
ar->fw.rx_filter = true;
ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
- FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS |
- FIF_PROMISC_IN_BSS;
+ FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS;
}
if (SUPP(CARL9170FW_HW_COUNTERS))
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
index 78dadc797558..2c74425f5059 100644
--- a/drivers/net/wireless/ath/carl9170/led.c
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -122,7 +122,7 @@ static void carl9170_led_set_brightness(struct led_classdev *led,
}
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
- char *trigger)
+ const char *trigger)
{
int err;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f1455a04cb62..170c209f99b8 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1011,9 +1011,8 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (multicast != ar->cur_mc_hash)
WARN_ON(carl9170_update_multicast(ar, multicast));
- if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
- ar->sniffer_enabled = !!(*new_flags &
- (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
+ if (changed_flags & FIF_OTHER_BSS) {
+ ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
WARN_ON(carl9170_set_operating_mode(ar));
}
@@ -1033,7 +1032,7 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (!(*new_flags & FIF_PSPOLL))
rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
- if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
+ if (!(*new_flags & FIF_OTHER_BSS)) {
rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
}
@@ -1845,22 +1844,22 @@ void *carl9170_alloc(size_t priv_size)
/* firmware decides which modes we support */
hw->wiphy->interface_modes = 0;
- hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
- IEEE80211_HW_SUPPORTS_RC_TABLE |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
if (!modparam_noht) {
/*
* see the comment above, why we allow the user
* to disable HT by a module parameter.
*/
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
}
hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c9f93310c0d6..76842e6ca38e 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
unsigned int plen, void *payload, unsigned int outlen, void *out)
{
int err = -ENOMEM;
+ unsigned long time_left;
if (!IS_ACCEPTING_CMD(ar))
return -EIO;
@@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
- err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
- if (err == 0) {
+ time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
+ if (time_left == 0) {
err = -ETIMEDOUT;
goto err_unbuf;
}
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index c657ca26a71a..656ce42b339a 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -41,30 +41,31 @@ struct radar_types {
/* percentage on ppb threshold to trigger detection */
#define MIN_PPB_THRESH 50
-#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
+#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
/* percentage of pulse width tolerance */
#define WIDTH_TOLERANCE 5
#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
-#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
(PRF2PRI(PMAX) - PRI_TOLERANCE), \
(PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
- PPB_THRESH(PPB), PRI_TOLERANCE, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
/* radar types as defined by ETSI EN-301-893 v1.5.1 */
static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
- ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
- ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
- ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
- ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
- ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
- ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
- ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
+ ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18, false),
+ ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10, false),
+ ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15, false),
+ ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25, false),
+ ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
+ ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10, false),
+ ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15, false),
};
static const struct radar_types etsi_radar_types_v15 = {
@@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = {
.radar_types = etsi_radar_ref_types_v15,
};
-#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP) \
{ \
ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
PMIN - PRI_TOLERANCE, \
PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
- PPB_THRESH(PPB), PRI_TOLERANCE, \
+ PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP \
}
+/* radar types released on August 14, 2014
+ * type 1 PRI values randomly selected within the range of 518 and 3066.
+ * divide it to 3 groups is good enough for both of radar detection and
+ * avoiding false detection based on practical test results
+ * collected for more than a year.
+ */
static const struct radar_detector_specs fcc_radar_ref_types[] = {
- FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
- FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
- FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
- FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
- FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
- FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+ FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
+ FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
+ FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
+ FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
+ FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
+ FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
+ FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
+ FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
+ FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
};
static const struct radar_types fcc_radar_types = {
@@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = {
.radar_types = fcc_radar_ref_types,
};
-#define JP_PATTERN FCC_PATTERN
+#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP) \
+{ \
+ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
+ PMIN - PRI_TOLERANCE, \
+ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
+ PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP \
+}
static const struct radar_detector_specs jp_radar_ref_types[] = {
- JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
- JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
- JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
- JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
- JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
- JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
- JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
- JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
+ JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
+ JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
+ JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
};
static const struct radar_types jp_radar_types = {
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index dde2652b787c..25a43d632f90 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -40,12 +40,14 @@ struct ath_dfs_pool_stats {
* @freq: channel frequency in MHz
* @width: pulse duration in us
* @rssi: rssi of radar event
+ * @chirp: chirp detected in pulse
*/
struct pulse_event {
u64 ts;
u16 freq;
u8 width;
u8 rssi;
+ bool chirp;
};
/**
@@ -59,6 +61,7 @@ struct pulse_event {
* @ppb: pulses per bursts for this type
* @ppb_thresh: number of pulses required to trigger detection
* @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ * @chirp: chirp required for the radar pattern
*/
struct radar_detector_specs {
u8 type_id;
@@ -70,6 +73,7 @@ struct radar_detector_specs {
u8 ppb;
u8 ppb_thresh;
u8 max_pri_tolerance;
+ bool chirp;
};
/**
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 43b608178884..1b5ad1965607 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
if ((ts - de->last_ts) < rs->max_pri_tolerance)
/* if delta to last pulse is too short, don't use this pulse */
return NULL;
+ /* radar detector spec needs chirp, but not detected */
+ if (rs->chirp && rs->chirp != event->chirp)
+ return NULL;
+
de->last_ts = ts;
max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 0783d2ed8238..900e72a089d8 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -944,12 +944,12 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
WLAN_CIPHER_SUITE_CCMP,
};
- wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
+ ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
+ ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index dbd894428be6..c9263e1c75d4 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -216,9 +216,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
sta_params->encrypt_type = priv_vif->encrypt_type;
- sta_params->short_preamble_supported =
- !(WCN36XX_FLAGS(wcn) &
- IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+ sta_params->short_preamble_supported = true;
sta_params->rifs_mode = 0;
sta_params->rmf = 0;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index caa717bf52f3..050506f842e9 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -12,6 +12,7 @@ wil6210-y += debug.o
wil6210-y += rx_reorder.o
wil6210-y += ioctl.o
wil6210-y += fw.o
+wil6210-y += pmc.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index b97172667bc7..dbfcdd16628a 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
rsn_eid = sme->ie ?
cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
NULL;
-
- if (sme->privacy && !rsn_eid) {
- wil_err(wil, "Missing RSN IE for secure connection\n");
- return -EINVAL;
- }
+ if (sme->privacy && !rsn_eid)
+ wil_info(wil, "WSC connection\n");
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
@@ -425,10 +422,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
wil->privacy = sme->privacy;
if (wil->privacy) {
- /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
- rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+ /* For secure assoc, remove old keys */
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+ WMI_KEY_USE_PAIRWISE);
if (rc) {
- wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+ goto out;
+ }
+ rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+ WMI_KEY_USE_RX_GROUP);
+ if (rc) {
+ wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
goto out;
}
}
@@ -458,11 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
if (wil->privacy) {
- conn.dot11_auth_mode = WMI_AUTH11_SHARED;
- conn.auth_mode = WMI_AUTH_WPA2_PSK;
- conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
- conn.pairwise_crypto_len = 16;
- } else {
+ if (rsn_eid) { /* regular secure connection */
+ conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+ conn.auth_mode = WMI_AUTH_WPA2_PSK;
+ conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.pairwise_crypto_len = 16;
+ conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
+ conn.group_crypto_len = 16;
+ } else { /* WSC */
+ conn.dot11_auth_mode = WMI_AUTH11_WSC;
+ conn.auth_mode = WMI_AUTH_NONE;
+ }
+ } else { /* insecure connection */
conn.dot11_auth_mode = WMI_AUTH11_OPEN;
conn.auth_mode = WMI_AUTH_NONE;
}
@@ -507,6 +518,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+
rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
return rc;
@@ -561,6 +574,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
return 0;
}
+static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
+ bool pairwise)
+{
+ struct wireless_dev *wdev = wil->wdev;
+ enum wmi_key_usage rc;
+ static const char * const key_usage_str[] = {
+ [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
+ [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
+ [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
+ };
+
+ if (pairwise) {
+ rc = WMI_KEY_USE_PAIRWISE;
+ } else {
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ rc = WMI_KEY_USE_RX_GROUP;
+ break;
+ case NL80211_IFTYPE_AP:
+ rc = WMI_KEY_USE_TX_GROUP;
+ break;
+ default:
+ /* TODO: Rx GTK or Tx GTK? */
+ wil_err(wil, "Can't determine GTK type\n");
+ rc = WMI_KEY_USE_RX_GROUP;
+ break;
+ }
+ }
+ wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+
+ return rc;
+}
+
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
@@ -568,13 +614,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct key_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- /* group key is not used */
- if (!pairwise)
- return 0;
+ wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+ pairwise ? "PTK" : "GTK");
- return wmi_add_cipher_key(wil, key_index, mac_addr,
- params->key_len, params->key);
+ return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+ params->key, key_usage);
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -583,12 +629,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
const u8 *mac_addr)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- /* group key is not used */
- if (!pairwise)
- return 0;
+ wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+ pairwise ? "PTK" : "GTK");
- return wmi_del_cipher_key(wil, key_index, mac_addr);
+ return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
/* Need to be present or wiphy_new() will WARN */
@@ -661,11 +707,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
if (bcon->probe_resp_len <= hlen)
return 0;
- if (!bcon->proberesp_ies) {
- bcon->proberesp_ies = f->u.probe_resp.variable;
- bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
- rc = 1;
- }
if (!bcon->assocresp_ies) {
bcon->assocresp_ies = f->u.probe_resp.variable;
bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
@@ -680,9 +721,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ const u8 *pr_ies = NULL;
+ size_t pr_ies_len = 0;
int rc;
wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_print_bcon_data(bcon);
+
+ if (bcon->probe_resp_len > hlen) {
+ pr_ies = f->u.probe_resp.variable;
+ pr_ies_len = bcon->probe_resp_len - hlen;
+ }
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
@@ -695,9 +746,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
- bcon->proberesp_ies_len,
- bcon->proberesp_ies);
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
if (rc) {
wil_err(wil, "set_ie(PROBE_RESP) failed\n");
return rc;
@@ -725,6 +774,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+ struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ const u8 *pr_ies = NULL;
+ size_t pr_ies_len = 0;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -744,6 +797,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
+ if (bcon->probe_resp_len > hlen) {
+ pr_ies = f->u.probe_resp.variable;
+ pr_ies_len = bcon->probe_resp_len - hlen;
+ }
+
if (wil_fix_bcon(wil, bcon)) {
wil_dbg_misc(wil, "Fixed bcon\n");
wil_print_bcon_data(bcon);
@@ -771,8 +829,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
* wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
* bcon->beacon_ies);
*/
- wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
- bcon->proberesp_ies);
+ wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
bcon->assocresp_ies);
@@ -814,13 +871,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wmi_pcp_stop(wil);
__wil_down(wil);
- __wil_up(wil);
mutex_unlock(&wil->mutex);
- /* some functions above might fail (e.g. __wil_up). Nevertheless, we
- * return success because AP has stopped
- */
return 0;
}
@@ -830,6 +883,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+ params->reason_code);
+
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, params->mac, params->reason_code, false);
mutex_unlock(&wil->mutex);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index bbc22d88f78f..8f9c0722a801 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,6 +24,7 @@
#include "wil6210.h"
#include "wmi.h"
#include "txrx.h"
+#include "pmc.h"
/* Nasty hack. Better have per device instances */
static u32 mem_addr;
@@ -123,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
if (cid < WIL6210_MAX_CID)
seq_printf(s,
- "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+ "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
wil->sta[cid].addr, cid, tid,
+ txdata->dot1x_open ? "+" : "-",
txdata->agg_wsize,
txdata->agg_timeout,
txdata->agg_amsdu ? "+" : "-",
used, avail, sidle);
else
seq_printf(s,
- "\nBroadcast [%3d|%3d] idle %s\n",
+ "\nBroadcast 1x%s [%3d|%3d] idle %s\n",
+ txdata->dot1x_open ? "+" : "-",
used, avail, sidle);
wil_print_vring(s, wil, name, vring, '_', 'H');
@@ -702,6 +705,89 @@ static const struct file_operations fops_back = {
.open = simple_open,
};
+/* pmc control, write:
+ * - "alloc <num descriptors> <descriptor_size>" to allocate PMC
+ * - "free" to release memory allocated for PMC
+ */
+static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+ char cmd[9];
+ int num_descs, desc_size;
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+
+ if (rc < 1) {
+ wil_err(wil, "pmccfg: no params given\n");
+ return -EINVAL;
+ }
+
+ if (0 == strcmp(cmd, "alloc")) {
+ if (rc != 3) {
+ wil_err(wil, "pmccfg: alloc requires 2 params\n");
+ return -EINVAL;
+ }
+ wil_pmc_alloc(wil, num_descs, desc_size);
+ } else if (0 == strcmp(cmd, "free")) {
+ if (rc != 1) {
+ wil_err(wil, "pmccfg: free does not have any params\n");
+ return -EINVAL;
+ }
+ wil_pmc_free(wil, true);
+ } else {
+ wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ char text[256];
+ char help[] = "pmc control, write:\n"
+ " - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
+ " - \"free\" to free memory allocated for pmc\n";
+
+ sprintf(text, "Last command status: %d\n\n%s",
+ wil_pmc_last_cmd_status(wil),
+ help);
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ strlen(text) + 1);
+}
+
+static const struct file_operations fops_pmccfg = {
+ .read = wil_read_pmccfg,
+ .write = wil_write_pmccfg,
+ .open = simple_open,
+};
+
+static const struct file_operations fops_pmcdata = {
+ .open = simple_open,
+ .read = wil_pmc_read,
+ .llseek = wil_pmc_llseek,
+};
+
/*---tx_mgmt---*/
/* Write mgmt frame to this file to send it */
static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1111,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data)
status = "connected";
break;
}
- seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
- (p->data_port_open ? " data_port_open" : ""));
+ seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
rc = wil_cid_fill_sinfo(wil, i, &sinfo);
@@ -1292,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
status = "connected";
break;
}
- seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
- (p->data_port_open ? " data_port_open" : ""));
+ seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -1363,6 +1447,8 @@ static const struct {
{"tx_mgmt", S_IWUSR, &fops_txmgmt},
{"wmi_send", S_IWUSR, &fops_wmi},
{"back", S_IRUGO | S_IWUSR, &fops_back},
+ {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
+ {"pmcdata", S_IRUGO, &fops_pmcdata},
{"temp", S_IRUGO, &fops_temp},
{"freq", S_IRUGO, &fops_freq},
{"link", S_IRUGO, &fops_link},
@@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
if (IS_ERR_OR_NULL(dbg))
return -ENODEV;
+ wil_pmc_init(wil);
+
wil6210_debugfs_init_files(wil, dbg);
wil6210_debugfs_init_isr(wil, dbg);
wil6210_debugfs_init_blobs(wil, dbg);
@@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
{
debugfs_remove_recursive(wil->debug);
wil->debug = NULL;
+
+ /* free pmc memory without sending command to fw, as it will
+ * be reset on the way down anyway
+ */
+ wil_pmc_free(wil, false);
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index c2a238426425..6d704aee3afd 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -25,6 +25,10 @@
#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
+bool debug_fw; /* = false; */
+module_param(debug_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+
bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -146,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
- sta->data_port_open = false;
if (sta->status != wil_sta_unused) {
if (!from_event)
wmi_disconnect_sta(wil, sta->addr, reason_code);
@@ -224,7 +227,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
if (test_bit(wil_status_fwconnected, wil->status)) {
clear_bit(wil_status_fwconnected, wil->status);
cfg80211_disconnected(ndev, reason_code,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, false, GFP_KERNEL);
} else if (test_bit(wil_status_fwconnecting, wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -373,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil)
if (ri < 0)
return ri;
+ wil->bcast_vring = ri;
rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
- if (rc == 0)
- wil->bcast_vring = ri;
+ if (rc)
+ wil->bcast_vring = -1;
return rc;
}
@@ -547,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
- u32 x;
+ u32 x, x1 = 0;
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
@@ -602,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
do {
msleep(RST_DELAY);
x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+ if (x1 != x) {
+ wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
+ x1 = x;
+ }
if (delay++ > RST_COUNT) {
wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
x);
return -ETIME;
}
- } while (!(x & BIT_BL_READY));
+ } while (x != BIT_BL_READY);
C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
@@ -686,6 +694,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
+ if (debug_fw) {
+ static const u8 mac[ETH_ALEN] = {
+ 0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
+ };
+ struct net_device *ndev = wil_to_ndev(wil);
+
+ ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
+ return 0;
+ }
+
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index f2f7ea29558e..6042f61b016c 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev)
wil_dbg_misc(wil, "%s()\n", __func__);
+ if (debug_fw) {
+ wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+ return -EINVAL;
+ }
+
return wil_up(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 109986114abf..58c79166a6d1 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi,
" Use MSI interrupt: "
"0 - don't, 1 - (default) - single, or 3");
-static bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
-MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
-
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
@@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
mutex_lock(&wil->mutex);
rc = wil_reset(wil, false);
mutex_unlock(&wil->mutex);
- if (debug_fw)
- rc = 0;
if (rc)
goto release_irq;
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
new file mode 100644
index 000000000000..8a8cdc61b25b
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include "wmi.h"
+#include "wil6210.h"
+#include "txrx.h"
+#include "pmc.h"
+
+struct desc_alloc_info {
+ dma_addr_t pa;
+ void *va;
+};
+
+static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
+{
+ return !!pmc->pring_va;
+}
+
+void wil_pmc_init(struct wil6210_priv *wil)
+{
+ memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
+ mutex_init(&wil->pmc.lock);
+}
+
+/**
+ * Allocate the physical ring (p-ring) and the required
+ * number of descriptors of required size.
+ * Initialize the descriptors as required by pmc dma.
+ * The descriptors' buffers dwords are initialized to hold
+ * dword's serial number in the lsw and reserved value
+ * PCM_DATA_INVALID_DW_VAL in the msw.
+ */
+void wil_pmc_alloc(struct wil6210_priv *wil,
+ int num_descriptors,
+ int descriptor_size)
+{
+ u32 i;
+ struct pmc_ctx *pmc = &wil->pmc;
+ struct device *dev = wil_to_dev(wil);
+ struct wmi_pmc_cmd pmc_cmd = {0};
+
+ mutex_lock(&pmc->lock);
+
+ if (wil_is_pmc_allocated(pmc)) {
+ /* sanity check */
+ wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+ goto no_release_err;
+ }
+
+ pmc->num_descriptors = num_descriptors;
+ pmc->descriptor_size = descriptor_size;
+
+ wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
+ __func__, num_descriptors, descriptor_size);
+
+ /* allocate descriptors info list in pmc context*/
+ pmc->descriptors = kcalloc(num_descriptors,
+ sizeof(struct desc_alloc_info),
+ GFP_KERNEL);
+ if (!pmc->descriptors) {
+ wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+ goto no_release_err;
+ }
+
+ wil_dbg_misc(wil,
+ "%s: allocated descriptors info list %p\n",
+ __func__, pmc->descriptors);
+
+ /* Allocate pring buffer and descriptors.
+ * vring->va should be aligned on its size rounded up to power of 2
+ * This is granted by the dma_alloc_coherent
+ */
+ pmc->pring_va = dma_alloc_coherent(dev,
+ sizeof(struct vring_tx_desc) * num_descriptors,
+ &pmc->pring_pa,
+ GFP_KERNEL);
+
+ wil_dbg_misc(wil,
+ "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
+ __func__,
+ pmc->pring_va, &pmc->pring_pa,
+ sizeof(struct vring_tx_desc),
+ num_descriptors,
+ sizeof(struct vring_tx_desc) * num_descriptors);
+
+ if (!pmc->pring_va) {
+ wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+ goto release_pmc_skb_list;
+ }
+
+ /* initially, all descriptors are SW owned
+ * For Tx, Rx, and PMC, ownership bit is at the same location, thus
+ * we can use any
+ */
+ for (i = 0; i < num_descriptors; i++) {
+ struct vring_tx_desc *_d = &pmc->pring_va[i];
+ struct vring_tx_desc dd, *d = &dd;
+ int j = 0;
+
+ pmc->descriptors[i].va = dma_alloc_coherent(dev,
+ descriptor_size,
+ &pmc->descriptors[i].pa,
+ GFP_KERNEL);
+
+ if (unlikely(!pmc->descriptors[i].va)) {
+ wil_err(wil,
+ "%s: ERROR allocating pmc descriptor %d",
+ __func__, i);
+ goto release_pmc_skbs;
+ }
+
+ for (j = 0; j < descriptor_size / sizeof(u32); j++) {
+ u32 *p = (u32 *)pmc->descriptors[i].va + j;
+ *p = PCM_DATA_INVALID_DW_VAL | j;
+ }
+
+ /* configure dma descriptor */
+ d->dma.addr.addr_low =
+ cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
+ d->dma.addr.addr_high =
+ cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
+ d->dma.status = 0; /* 0 = HW_OWNED */
+ d->dma.length = cpu_to_le16(descriptor_size);
+ d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ *_d = *d;
+ }
+
+ wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+
+ pmc_cmd.op = WMI_PMC_ALLOCATE;
+ pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
+ pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
+
+ wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+ pmc->last_cmd_status = wmi_send(wil,
+ WMI_PMC_CMDID,
+ &pmc_cmd,
+ sizeof(pmc_cmd));
+ if (pmc->last_cmd_status) {
+ wil_err(wil,
+ "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
+ __func__, pmc->last_cmd_status);
+ goto release_pmc_skbs;
+ }
+
+ mutex_unlock(&pmc->lock);
+
+ return;
+
+release_pmc_skbs:
+ wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+ for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+ dma_free_coherent(dev,
+ descriptor_size,
+ pmc->descriptors[i].va,
+ pmc->descriptors[i].pa);
+
+ pmc->descriptors[i].va = NULL;
+ }
+ wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+
+ dma_free_coherent(dev,
+ sizeof(struct vring_tx_desc) * num_descriptors,
+ pmc->pring_va,
+ pmc->pring_pa);
+
+ pmc->pring_va = NULL;
+
+release_pmc_skb_list:
+ wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
+ __func__);
+ kfree(pmc->descriptors);
+ pmc->descriptors = NULL;
+
+no_release_err:
+ pmc->last_cmd_status = -ENOMEM;
+ mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Traverse the p-ring and release all buffers.
+ * At the end release the p-ring memory
+ */
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
+{
+ struct pmc_ctx *pmc = &wil->pmc;
+ struct device *dev = wil_to_dev(wil);
+ struct wmi_pmc_cmd pmc_cmd = {0};
+
+ mutex_lock(&pmc->lock);
+
+ pmc->last_cmd_status = 0;
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
+ __func__);
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return;
+ }
+
+ if (send_pmc_cmd) {
+ wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
+ __func__);
+ pmc_cmd.op = WMI_PMC_RELEASE;
+ pmc->last_cmd_status =
+ wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
+ sizeof(pmc_cmd));
+ if (pmc->last_cmd_status) {
+ wil_err(wil,
+ "%s WMI_PMC_CMD with RELEASE op failed, status %d",
+ __func__, pmc->last_cmd_status);
+ /* There's nothing we can do with this error.
+ * Normally, it should never occur.
+ * Continue to freeing all memory allocated for pmc.
+ */
+ }
+ }
+
+ if (pmc->pring_va) {
+ size_t buf_size = sizeof(struct vring_tx_desc) *
+ pmc->num_descriptors;
+
+ wil_dbg_misc(wil, "%s: free pring va %p\n",
+ __func__, pmc->pring_va);
+ dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
+
+ pmc->pring_va = NULL;
+ } else {
+ pmc->last_cmd_status = -ENOENT;
+ }
+
+ if (pmc->descriptors) {
+ int i;
+
+ for (i = 0;
+ pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+ dma_free_coherent(dev,
+ pmc->descriptor_size,
+ pmc->descriptors[i].va,
+ pmc->descriptors[i].pa);
+ pmc->descriptors[i].va = NULL;
+ }
+ wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
+ __func__, i, pmc->num_descriptors);
+ wil_dbg_misc(wil,
+ "%s: free pmc descriptors info list %p\n",
+ __func__, pmc->descriptors);
+ kfree(pmc->descriptors);
+ pmc->descriptors = NULL;
+ } else {
+ pmc->last_cmd_status = -ENOENT;
+ }
+
+ mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Status of the last operation requested via debugfs: alloc/free/read.
+ * 0 - success or negative errno
+ */
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s: status %d\n", __func__,
+ wil->pmc.last_cmd_status);
+
+ return wil->pmc.last_cmd_status;
+}
+
+/**
+ * Read from required position up to the end of current descriptor,
+ * depends on descriptor size configured during alloc request.
+ */
+ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct wil6210_priv *wil = filp->private_data;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t retval = 0;
+ unsigned long long idx;
+ loff_t offset;
+ size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+ mutex_lock(&pmc->lock);
+
+ if (!wil_is_pmc_allocated(pmc)) {
+ wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+ pmc->last_cmd_status = -EPERM;
+ mutex_unlock(&pmc->lock);
+ return -EPERM;
+ }
+
+ wil_dbg_misc(wil,
+ "%s: size %u, pos %lld\n",
+ __func__, (unsigned)count, *f_pos);
+
+ pmc->last_cmd_status = 0;
+
+ idx = *f_pos;
+ do_div(idx, pmc->descriptor_size);
+ offset = *f_pos - (idx * pmc->descriptor_size);
+
+ if (*f_pos >= pmc_size) {
+ wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
+ __func__, *f_pos, (unsigned)pmc_size);
+ pmc->last_cmd_status = -ERANGE;
+ goto out;
+ }
+
+ wil_dbg_misc(wil,
+ "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+ __func__, *f_pos, idx, offset, count);
+
+ /* if no errors, return the copied byte count */
+ retval = simple_read_from_buffer(buf,
+ count,
+ &offset,
+ pmc->descriptors[idx].va,
+ pmc->descriptor_size);
+ *f_pos += retval;
+out:
+ mutex_unlock(&pmc->lock);
+
+ return retval;
+}
+
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+ struct wil6210_priv *wil = filp->private_data;
+ struct pmc_ctx *pmc = &wil->pmc;
+ size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+ switch (whence) {
+ case 0: /* SEEK_SET */
+ newpos = off;
+ break;
+
+ case 1: /* SEEK_CUR */
+ newpos = filp->f_pos + off;
+ break;
+
+ case 2: /* SEEK_END */
+ newpos = pmc_size;
+ break;
+
+ default: /* can't happen */
+ return -EINVAL;
+ }
+
+ if (newpos < 0)
+ return -EINVAL;
+ if (newpos > pmc_size)
+ newpos = pmc_size;
+
+ filp->f_pos = newpos;
+
+ return newpos;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
new file mode 100644
index 000000000000..bebc8d52e1e6
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pmc.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+
+#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
+
+void wil_pmc_init(struct wil6210_priv *wil);
+void wil_pmc_alloc(struct wil6210_priv *wil,
+ int num_descriptors, int descriptor_size);
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
+ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index e8bd512d81a9..0113dac3a9a9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
return -ENOMEM;
}
- d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+ d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
wil_desc_addr_set(&d->dma.addr, pa);
/* ip_length don't care */
/* b11 don't care */
@@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ if (!wil->privacy)
+ txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
- if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
+ if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
return 0;
out_free:
+ txdata->dot1x_open = false;
+ txdata->enabled = 0;
wil_vring_free(wil, vring, 1);
out:
@@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+ if (!wil->privacy)
+ txdata->dot1x_open = true;
rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
if (rc)
@@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
return 0;
out_free:
+ txdata->enabled = 0;
+ txdata->dot1x_open = false;
wil_vring_free(wil, vring, 1);
out:
@@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = false;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
/* make sure NAPI won't touch this vring */
@@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
if (cid < 0)
return NULL;
- if (!wil->sta[cid].data_port_open &&
- (skb->protocol != cpu_to_be16(ETH_P_PAE)))
- return NULL;
-
/* TODO: fix for multiple TID */
for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ continue;
if (wil->vring2cid_tid[i][0] == cid) {
struct vring *v = &wil->vring_tx[i];
@@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
- * find 1-st vring and see whether it is eligible for data
+ * find 1-st vring eligible for this skb and use it.
*/
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
@@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->sta[cid].data_port_open &&
+ if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
- break;
+ continue;
wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
@@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
* in all cases override dest address to unicast peer's address
* Use old strategy when new is not supported yet:
* - for PBSS
- * - for secure link
*/
static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
struct sk_buff *skb)
@@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
v = &wil->vring_tx[i];
if (!v->va)
return NULL;
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ return NULL;
return v;
}
@@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->sta[cid].data_port_open)
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
/* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -989,7 +1000,8 @@ found:
cid = wil->vring2cid_tid[i][0];
if (cid >= WIL6210_MAX_CID) /* skip BCAST */
continue;
- if (!wil->sta[cid].data_port_open)
+ if (!wil->vring_tx_data[i].dot1x_open &&
+ (skb->protocol != cpu_to_be16(ETH_P_PAE)))
continue;
if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
if (wdev->iftype != NL80211_IFTYPE_AP)
return wil_find_tx_bcast_2(wil, skb);
- if (wil->privacy)
- return wil_find_tx_bcast_2(wil, skb);
-
return wil_find_tx_bcast_1(wil, skb);
}
@@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_tx_desc_map(d, pa, len, vring_index);
if (unlikely(mcast)) {
d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
- if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
- /* set MCS 1 */
+ if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
- /* packet mode 2 */
- d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
- (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
- }
}
/* Process TCP/UDP checksum offloading */
if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index d90c8aa20c15..0c4638487c74 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -384,19 +384,27 @@ struct vring_rx_mac {
* [word 7] length
*/
-#define RX_DMA_D0_CMD_DMA_IT BIT(10)
-
-/* Error field, offload bits */
-#define RX_DMA_ERROR_L3_ERR BIT(4)
-#define RX_DMA_ERROR_L4_ERR BIT(5)
+#define RX_DMA_D0_CMD_DMA_EOP BIT(8)
+#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */
+#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */
+
+/* Error field */
+#define RX_DMA_ERROR_FCS BIT(0)
+#define RX_DMA_ERROR_MIC BIT(1)
+#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */
+#define RX_DMA_ERROR_REPLAY BIT(3)
+#define RX_DMA_ERROR_L3_ERR BIT(4)
+#define RX_DMA_ERROR_L4_ERR BIT(5)
/* Status field */
-#define RX_DMA_STATUS_DU BIT(0)
-#define RX_DMA_STATUS_ERROR BIT(2)
-
+#define RX_DMA_STATUS_DU BIT(0)
+#define RX_DMA_STATUS_EOP BIT(1)
+#define RX_DMA_STATUS_ERROR BIT(2)
+#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */
#define RX_DMA_STATUS_L3I BIT(4)
#define RX_DMA_STATUS_L4I BIT(5)
#define RX_DMA_STATUS_PHY_INFO BIT(6)
+#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */
struct vring_rx_dma {
u32 d0;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 4310972c9e16..f3513a1fa424 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -21,6 +21,7 @@
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include <linux/timex.h>
+#include <linux/types.h>
#include "wil_platform.h"
extern bool no_fw_recovery;
@@ -29,10 +30,11 @@ extern unsigned short rx_ring_overflow_thrsh;
extern int agg_wsize;
extern u32 vring_idle_trsh;
extern bool rx_align_2;
+extern bool debug_fw;
#define WIL_NAME "wil6210"
#define WIL_FW_NAME "wil6210.fw" /* code */
-#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
+#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
#define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
@@ -396,6 +398,7 @@ struct vring {
* Additional data for Tx Vring
*/
struct vring_tx_data {
+ bool dot1x_open;
int enabled;
cycles_t idle, last_idle, begin;
u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
@@ -484,7 +487,6 @@ struct wil_sta_info {
u8 addr[ETH_ALEN];
enum wil_sta_status status;
struct wil_net_stats stats;
- bool data_port_open; /* can send any data, not only EAPOL */
/* Rx BACK */
struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -526,6 +528,17 @@ struct wil_probe_client_req {
u8 cid;
};
+struct pmc_ctx {
+ /* alloc, free, and read operations must own the lock */
+ struct mutex lock;
+ struct vring_tx_desc *pring_va;
+ dma_addr_t pring_pa;
+ struct desc_alloc_info *descriptors;
+ int last_cmd_status;
+ int num_descriptors;
+ int descriptor_size;
+};
+
struct wil6210_priv {
struct pci_dev *pdev;
int n_msi;
@@ -610,6 +623,8 @@ struct wil6210_priv {
void *platform_handle;
struct wil_platform_ops platform_ops;
+
+ struct pmc_ctx pmc;
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -701,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
int wmi_set_channel(struct wil6210_priv *wil, int channel);
int wmi_get_channel(struct wil6210_priv *wil, int *channel);
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr);
+ const void *mac_addr, int key_usage);
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr, int key_len, const void *key);
+ const void *mac_addr, int key_len, const void *key,
+ int key_usage);
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 9fe2085be2c5..3dc8daf69bd2 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
}
-static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
+static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
{
- struct vring_tx_data *t;
- int i;
+ struct wmi_vring_en_event *evt = d;
+ u8 vri = evt->vring_index;
- for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
- if (cid != wil->vring2cid_tid[i][0])
- continue;
- t = &wil->vring_tx_data[i];
- if (!t->enabled)
- continue;
+ wil_dbg_wmi(wil, "Enable vring %d\n", vri);
- wil_addba_tx_request(wil, i, wsize);
- }
-}
-
-static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
-{
- struct wmi_data_port_open_event *evt = d;
- u8 cid = evt->cid;
-
- wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
-
- if (cid >= ARRAY_SIZE(wil->sta)) {
- wil_err(wil, "Link UP for invalid CID %d\n", cid);
+ if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+ wil_err(wil, "Enable for invalid vring %d\n", vri);
return;
}
-
- wil->sta[cid].data_port_open = true;
- if (agg_wsize >= 0)
- wil_addba_tx_cid(wil, cid, agg_wsize);
-}
-
-static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
-{
- struct net_device *ndev = wil_to_ndev(wil);
- struct wmi_wbe_link_down_event *evt = d;
- u8 cid = evt->cid;
-
- wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
- cid, le32_to_cpu(evt->reason));
-
- if (cid >= ARRAY_SIZE(wil->sta)) {
- wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+ wil->vring_tx_data[vri].dot1x_open = true;
+ if (vri == wil->bcast_vring) /* no BA for bcast */
return;
- }
-
- wil->sta[cid].data_port_open = false;
- netif_carrier_off(ndev);
+ if (agg_wsize >= 0)
+ wil_addba_tx_request(wil, vri, agg_wsize);
}
static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
@@ -695,11 +662,10 @@ static const struct {
{WMI_CONNECT_EVENTID, wmi_evt_connect},
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
{WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
- {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
- {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
{WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
+ {WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
};
/*
@@ -844,7 +810,7 @@ int wmi_echo(struct wil6210_priv *wil)
};
return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
- WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+ WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
}
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr)
+ const void *mac_addr, int key_usage)
{
struct wmi_delete_cipher_key_cmd cmd = {
.key_index = key_index,
@@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
}
int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
- const void *mac_addr, int key_len, const void *key)
+ const void *mac_addr, int key_len, const void *key,
+ int key_usage)
{
struct wmi_add_cipher_key_cmd cmd = {
.key_index = key_index,
- .key_usage = WMI_KEY_USE_PAIRWISE,
+ .key_usage = key_usage,
.key_len = key_len,
};
@@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
- WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
+ 100);
if (rc)
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index b29055315350..cc04ab73b398 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity .
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd {
*/
enum wmi_key_usage {
WMI_KEY_USE_PAIRWISE = 0,
- WMI_KEY_USE_GROUP = 1,
- WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */
+ WMI_KEY_USE_RX_GROUP = 1,
+ WMI_KEY_USE_TX_GROUP = 2,
};
struct wmi_add_cipher_key_cmd {
@@ -836,6 +836,21 @@ struct wmi_temp_sense_cmd {
} __packed;
/*
+ * WMI_PMC_CMDID
+ */
+enum wmi_pmc_op_e {
+ WMI_PMC_ALLOCATE = 0,
+ WMI_PMC_RELEASE = 1,
+};
+
+struct wmi_pmc_cmd {
+ u8 op; /* enum wmi_pmc_cmd_op_type */
+ u8 reserved;
+ __le16 ring_size;
+ __le64 mem_base;
+} __packed;
+
+/*
* WMI Events
*/
@@ -870,7 +885,7 @@ enum wmi_event_id {
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
- WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
@@ -882,7 +897,7 @@ enum wmi_event_id {
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
- WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
@@ -894,11 +909,12 @@ enum wmi_event_id {
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
- WMI_WBE_LINKDOWN_EVENTID = 0x1861,
+ WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+ WMI_VRING_EN_EVENTID = 0x1865,
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
@@ -1147,7 +1163,7 @@ struct wmi_vring_cfg_done_event {
} __packed;
/*
- * WMI_ADDBA_RESP_SENT_EVENTID
+ * WMI_RCP_ADDBA_RESP_SENT_EVENTID
*/
struct wmi_rcp_addba_resp_sent_event {
u8 cidxtid;
@@ -1179,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event {
} __packed;
/*
- * WMI_WBE_LINKDOWN_EVENTID
+ * WMI_WBE_LINK_DOWN_EVENTID
*/
enum wmi_wbe_link_down_event_reason {
WMI_WBE_REASON_USER_REQUEST = 0,
@@ -1202,6 +1218,14 @@ struct wmi_data_port_open_event {
} __packed;
/*
+ * WMI_VRING_EN_EVENTID
+ */
+struct wmi_vring_en_event {
+ u8 vring_index;
+ u8 reserved[3];
+} __packed;
+
+/*
* WMI_GET_PCP_CHANNEL_EVENTID
*/
struct wmi_get_pcp_channel_event {
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b2f9521fe551..575b9f4b5589 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3131,8 +3131,6 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
ctl |= B43_MACCTL_KEEP_BAD;
if (wl->filter_flags & FIF_PLCPFAIL)
ctl |= B43_MACCTL_KEEP_BADPLCP;
- if (wl->filter_flags & FIF_PROMISC_IN_BSS)
- ctl |= B43_MACCTL_PROMISC;
if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
ctl |= B43_MACCTL_BEACPROMISC;
@@ -4310,16 +4308,14 @@ static void b43_op_configure_filter(struct ieee80211_hw *hw,
goto out_unlock;
}
- *fflags &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ *fflags &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC;
- changed &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ changed &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
@@ -5609,8 +5605,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
wl = hw_to_b43_wl(hw);
/* fill hw info */
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM;
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c77b7f59505c..afc1fb3e38df 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2055,8 +2055,6 @@ static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev)
ctl |= B43legacy_MACCTL_KEEP_BAD;
if (wl->filter_flags & FIF_PLCPFAIL)
ctl |= B43legacy_MACCTL_KEEP_BADPLCP;
- if (wl->filter_flags & FIF_PROMISC_IN_BSS)
- ctl |= B43legacy_MACCTL_PROMISC;
if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
ctl |= B43legacy_MACCTL_BEACPROMISC;
@@ -2922,16 +2920,14 @@ static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
}
spin_lock_irqsave(&wl->irq_lock, flags);
- *fflags &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ *fflags &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC;
- changed &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
+ changed &= FIF_ALLMULTI |
FIF_FCSFAIL |
FIF_PLCPFAIL |
FIF_CONTROL |
@@ -3836,8 +3832,9 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
}
/* fill hw info */
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM;
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 9b508bd3b839..71779b9e4bbe 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/acpi.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1011,6 +1012,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
return 0;
}
+static void brcmf_sdiod_host_fixup(struct mmc_host *host)
+{
+ /* runtime-pm powers off the device */
+ pm_runtime_forbid(host->parent);
+ /* avoid removal detection upon resume */
+ host->caps |= MMC_CAP_NONREMOVABLE;
+}
+
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
struct sdio_func *func;
@@ -1076,7 +1085,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
- pm_runtime_forbid(host->parent);
+ brcmf_sdiod_host_fixup(host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@@ -1108,12 +1117,25 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
+static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+ int val)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(dev);
+ if (adev)
+ adev->flags.power_manageable = 0;
+#endif
+}
+
static int brcmf_ops_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int err;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_bus *bus_if;
+ struct device *dev;
brcmf_dbg(SDIO, "Enter\n");
brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1121,6 +1143,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+ dev = &func->dev;
+ /* prohibit ACPI power management for this device */
+ brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+
/* Consume func num 1 but dont do anything with it. */
if (func->num == 1)
return 0;
@@ -1246,15 +1272,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
+ sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->wowl_enabled) {
- sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->pdata->oob_irq_supported)
enable_irq_wake(sdiodev->pdata->oob_irq_nr);
else
- sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
- if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
- brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
+ if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 8a15ebbce4a3..e10fa67010c0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = {
RATETAB_ENT(BRCM_RATE_54M, 0),
};
-#define wl_a_rates (__wl_rates + 4)
-#define wl_a_rates_size 8
#define wl_g_rates (__wl_rates + 0)
-#define wl_g_rates_size 12
+#define wl_g_rates_size ARRAY_SIZE(__wl_rates)
+#define wl_a_rates (__wl_rates + 4)
+#define wl_a_rates_size (wl_g_rates_size - 4)
+
+#define CHAN2G(_channel, _freq) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_channel), \
+ .flags = IEEE80211_CHAN_DISABLED, \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = 5000 + (5 * (_channel)), \
+ .hw_value = (_channel), \
+ .flags = IEEE80211_CHAN_DISABLED, \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+ CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
+ CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
+ CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
+ CHAN2G(13, 2472), CHAN2G(14, 2484)
+};
+
+static struct ieee80211_channel __wl_5ghz_channels[] = {
+ CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
+ CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
+ CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
+ CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
+ CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
+ CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
+};
/* Band templates duplicated per wiphy. The channel info
- * is filled in after querying the device.
+ * above is added to the band during setup.
*/
static const struct ieee80211_supported_band __wl_band_2ghz = {
.band = IEEE80211_BAND_2GHZ,
@@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = {
.n_bitrates = wl_g_rates_size,
};
-static const struct ieee80211_supported_band __wl_band_5ghz_a = {
+static const struct ieee80211_supported_band __wl_band_5ghz = {
.band = IEEE80211_BAND_5GHZ,
.bitrates = wl_a_rates,
.n_bitrates = wl_a_rates_size,
@@ -1262,7 +1296,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
- GFP_KERNEL);
+ true, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1928,7 +1962,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
- cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
@@ -5253,40 +5287,6 @@ dongle_scantime_out:
return err;
}
-/* Filter the list of channels received from firmware counting only
- * the 20MHz channels. The wiphy band data only needs those which get
- * flagged to indicate if they can take part in higher bandwidth.
- */
-static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
- struct brcmf_chanspec_list *chlist,
- u32 chcnt[])
-{
- u32 total = le32_to_cpu(chlist->count);
- struct brcmu_chan ch;
- int i;
-
- for (i = 0; i < total; i++) {
- ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
- cfg->d11inf.decchspec(&ch);
-
- /* Firmware gives a ordered list. We skip non-20MHz
- * channels is 2G. For 5G we can abort upon reaching
- * a non-20MHz channel in the list.
- */
- if (ch.bw != BRCMU_CHAN_BW_20) {
- if (ch.band == BRCMU_CHAN_BAND_5G)
- break;
- else
- continue;
- }
-
- if (ch.band == BRCMU_CHAN_BAND_2G)
- chcnt[0] += 1;
- else if (ch.band == BRCMU_CHAN_BAND_5G)
- chcnt[1] += 1;
- }
-}
-
static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
struct brcmu_chan *ch)
{
@@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
u32 i, j;
u32 total;
u32 chaninfo;
- u32 chcnt[2] = { 0, 0 };
u32 index;
pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
goto fail_pbuf;
}
- brcmf_count_20mhz_channels(cfg, list, chcnt);
wiphy = cfg_to_wiphy(cfg);
- if (chcnt[0]) {
- band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
- GFP_KERNEL);
- if (band == NULL) {
- err = -ENOMEM;
- goto fail_pbuf;
- }
- band->channels = kcalloc(chcnt[0], sizeof(*channel),
- GFP_KERNEL);
- if (band->channels == NULL) {
- kfree(band);
- err = -ENOMEM;
- goto fail_pbuf;
- }
- band->n_channels = 0;
- wiphy->bands[IEEE80211_BAND_2GHZ] = band;
- }
- if (chcnt[1]) {
- band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
- GFP_KERNEL);
- if (band == NULL) {
- err = -ENOMEM;
- goto fail_band2g;
- }
- band->channels = kcalloc(chcnt[1], sizeof(*channel),
- GFP_KERNEL);
- if (band->channels == NULL) {
- kfree(band);
- err = -ENOMEM;
- goto fail_band2g;
- }
- band->n_channels = 0;
- wiphy->bands[IEEE80211_BAND_5GHZ] = band;
- }
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (band)
+ for (i = 0; i < band->n_channels; i++)
+ band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (band)
+ for (i = 0; i < band->n_channels; i++)
+ band->channels[i].flags = IEEE80211_CHAN_DISABLED;
total = le32_to_cpu(list->count);
for (i = 0; i < total; i++) {
@@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
}
+ if (!band)
+ continue;
if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
ch.bw == BRCMU_CHAN_BW_40)
continue;
@@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
} else if (ch.bw == BRCMU_CHAN_BW_40) {
brcmf_update_bw40_channel_flag(&channel[index], &ch);
} else {
- /* disable other bandwidths for now as mentioned
- * order assure they are enabled for subsequent
- * chanspecs.
+ /* enable the channel and disable other bandwidths
+ * for now as mentioned order assure they are enabled
+ * for subsequent chanspecs.
*/
channel[index].flags = IEEE80211_CHAN_NO_HT40 |
IEEE80211_CHAN_NO_80MHZ;
@@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
IEEE80211_CHAN_NO_IR;
}
}
- if (index == band->n_channels)
- band->n_channels++;
}
- kfree(pbuf);
- return 0;
-fail_band2g:
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
- wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
fail_pbuf:
kfree(pbuf);
return err;
@@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
{
+ struct ieee80211_supported_band *band;
struct ieee80211_iface_combination ifc_combo;
+ __le32 bandlist[3];
+ u32 n_bands;
+ int err, i;
+
wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
@@ -5812,7 +5783,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
- brcmf_wiphy_pno_params(wiphy);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
+ brcmf_wiphy_pno_params(wiphy);
/* vendor commands/events support */
wiphy->vendor_commands = brcmf_vendor_cmds;
@@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
brcmf_wiphy_wowl_params(wiphy);
- return brcmf_setup_wiphybands(wiphy);
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
+ sizeof(bandlist));
+ if (err) {
+ brcmf_err("could not obtain band info: err=%d\n", err);
+ return err;
+ }
+ /* first entry in bandlist is number of bands */
+ n_bands = le32_to_cpu(bandlist[0]);
+ for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
+ if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
+ band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+ GFP_KERNEL);
+ if (!band)
+ return -ENOMEM;
+
+ band->channels = kmemdup(&__wl_2ghz_channels,
+ sizeof(__wl_2ghz_channels),
+ GFP_KERNEL);
+ if (!band->channels) {
+ kfree(band);
+ return -ENOMEM;
+ }
+
+ band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
+ wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ }
+ if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
+ band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
+ GFP_KERNEL);
+ if (!band)
+ return -ENOMEM;
+
+ band->channels = kmemdup(&__wl_5ghz_channels,
+ sizeof(__wl_5ghz_channels),
+ GFP_KERNEL);
+ if (!band->channels) {
+ kfree(band);
+ return -ENOMEM;
+ }
+
+ band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
+ wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ }
+ }
+ err = brcmf_setup_wiphybands(wiphy);
+ return err;
}
static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
memset(&ccreq, 0, sizeof(ccreq));
ccreq.rev = cpu_to_le32(-1);
memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
- brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
+ if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
+ brcmf_err("firmware rejected country setting\n");
+ return;
+ }
+ brcmf_setup_wiphybands(wiphy);
}
static void brcmf_free_wiphy(struct wiphy *wiphy)
{
+ if (!wiphy)
+ return;
+
kfree(wiphy->iface_combinations);
if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index ab2fac8b2760..288f8314f208 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -649,6 +649,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43567_CHIP_ID:
case BRCM_CC_43569_CHIP_ID:
case BRCM_CC_43570_CHIP_ID:
+ case BRCM_CC_4358_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
return 0x180000;
default:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
index 77656c711bed..26c65872dae3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
@@ -22,17 +22,6 @@
#include "core.h"
#include "commonring.h"
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- * SEE ALSO msgbuf.c
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
int (*cr_ring_bell)(void *ctx),
int (*cr_update_rptr)(void *ctx),
@@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
address = commonring->buf_addr;
address += (commonring->f_ptr * commonring->item_len);
if (commonring->f_ptr > commonring->w_ptr) {
- brcmf_dma_flush(address,
- (commonring->depth - commonring->f_ptr) *
- commonring->item_len);
address = commonring->buf_addr;
commonring->f_ptr = 0;
}
- brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
- commonring->item_len);
commonring->f_ptr = commonring->w_ptr;
@@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
if (commonring->r_ptr == commonring->depth)
commonring->r_ptr = 0;
- brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
-
return ret_addr;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index 7748a1ccf14f..2c5fad3a3aa2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -124,6 +124,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
struct brcmf_if *ifp = drvr->iflist[0];
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
index f5832e077bb7..546962525cd2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.h
@@ -19,11 +19,15 @@
/*
* Features:
*
+ * MBSS: multiple BSSID support (eg. guest network in AP mode).
* MCHAN: multi-channel for concurrent P2P.
+ * PNO: preferred network offload.
+ * WOWL: Wake-On-WLAN.
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
BRCMF_FEAT_DEF(MCHAN) \
+ BRCMF_FEAT_DEF(PNO) \
BRCMF_FEAT_DEF(WOWL)
/*
* Quirks:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 9cb99152ad17..7ae6461df932 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -23,6 +23,10 @@
#include "debug.h"
#include "firmware.h"
+#define BRCMF_FW_MAX_NVRAM_SIZE 64000
+#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
+#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
+
char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
module_param_string(firmware_path, brcmf_firmware_path,
BRCMF_FW_PATH_LEN, 0440);
@@ -46,6 +50,8 @@ enum nvram_parser_state {
* @column: current column in line.
* @pos: byte offset in input buffer.
* @entry: start position of key,value entry.
+ * @multi_dev_v1: detect pcie multi device v1 (compressed).
+ * @multi_dev_v2: detect pcie multi device v2.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -56,8 +62,16 @@ struct nvram_parser {
u32 column;
u32 pos;
u32 entry;
+ bool multi_dev_v1;
+ bool multi_dev_v2;
};
+/**
+ * is_nvram_char() - check if char is a valid one for NVRAM entry
+ *
+ * It accepts all printable ASCII chars except for '#' which opens a comment.
+ * Please note that ' ' (space) while accepted is not a valid key name char.
+ */
static bool is_nvram_char(char c)
{
/* comment marker excluded */
@@ -65,7 +79,7 @@ static bool is_nvram_char(char c)
return false;
/* key and value may have any other readable character */
- return (c > 0x20 && c < 0x7f);
+ return (c >= 0x20 && c < 0x7f);
}
static bool is_whitespace(char c)
@@ -108,7 +122,11 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
st = COMMENT;
else
st = VALUE;
- } else if (!is_nvram_char(c)) {
+ if (strncmp(&nvp->fwnv->data[nvp->entry], "devpath", 7) == 0)
+ nvp->multi_dev_v1 = true;
+ if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
+ nvp->multi_dev_v2 = true;
+ } else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
return COMMENT;
@@ -133,6 +151,8 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
skv = (u8 *)&nvp->fwnv->data[nvp->entry];
cplen = ekv - skv;
+ if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
+ return END;
/* copy to output buffer */
memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
nvp->nvram_len += cplen;
@@ -148,17 +168,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
static enum nvram_parser_state
brcmf_nvram_handle_comment(struct nvram_parser *nvp)
{
- char *eol, *sol;
+ char *eoc, *sol;
sol = (char *)&nvp->fwnv->data[nvp->pos];
- eol = strchr(sol, '\n');
- if (eol == NULL)
- return END;
+ eoc = strchr(sol, '\n');
+ if (!eoc) {
+ eoc = strchr(sol, '\0');
+ if (!eoc)
+ return END;
+ }
/* eat all moving to next line */
nvp->line++;
nvp->column = 1;
- nvp->pos += (eol - sol) + 1;
+ nvp->pos += (eoc - sol) + 1;
return IDLE;
}
@@ -180,10 +203,18 @@ static enum nvram_parser_state
static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
const struct firmware *nv)
{
+ size_t size;
+
memset(nvp, 0, sizeof(*nvp));
nvp->fwnv = nv;
+ /* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
+ if (nv->size > BRCMF_FW_MAX_NVRAM_SIZE)
+ size = BRCMF_FW_MAX_NVRAM_SIZE;
+ else
+ size = nv->size;
/* Alloc for extra 0 byte + roundup by 4 + length field */
- nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ size += 1 + 3 + sizeof(u32);
+ nvp->nvram = kzalloc(size, GFP_KERNEL);
if (!nvp->nvram)
return -ENOMEM;
@@ -192,12 +223,141 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
return 0;
}
+/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v1 is the version where nvram is stored
+ * compressed and "devpath" maps to index for valid entries.
+ */
+static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
+ u16 bus_nr)
+{
+ /* Device path with a leading '=' key-value separator */
+ char pcie_path[] = "=pcie/?/?";
+ size_t pcie_len;
+
+ u32 i, j;
+ bool found;
+ u8 *nvram;
+ u8 id;
+
+ nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ if (!nvram)
+ goto fail;
+
+ /* min length: devpath0=pcie/1/4/ + 0:x=y */
+ if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
+ goto fail;
+
+ /* First search for the devpathX and see if it is the configuration
+ * for domain_nr/bus_nr. Search complete nvp
+ */
+ snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
+ bus_nr);
+ pcie_len = strlen(pcie_path);
+ found = false;
+ i = 0;
+ while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
+ /* Format: devpathX=pcie/Y/Z/
+ * Y = domain_nr, Z = bus_nr, X = virtual ID
+ */
+ if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
+ (strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) {
+ id = nvp->nvram[i + 7] - '0';
+ found = true;
+ break;
+ }
+ while (nvp->nvram[i] != 0)
+ i++;
+ i++;
+ }
+ if (!found)
+ goto fail;
+
+ /* Now copy all valid entries, release old nvram and assign new one */
+ i = 0;
+ j = 0;
+ while (i < nvp->nvram_len) {
+ if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
+ i += 2;
+ while (nvp->nvram[i] != 0) {
+ nvram[j] = nvp->nvram[i];
+ i++;
+ j++;
+ }
+ nvram[j] = 0;
+ j++;
+ }
+ while (nvp->nvram[i] != 0)
+ i++;
+ i++;
+ }
+ kfree(nvp->nvram);
+ nvp->nvram = nvram;
+ nvp->nvram_len = j;
+ return;
+
+fail:
+ kfree(nvram);
+ nvp->nvram_len = 0;
+}
+
+/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v2 is the version where nvram is stored
+ * uncompressed, all relevant valid entries are identified by
+ * pcie/domain_nr/bus_nr:
+ */
+static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
+ u16 bus_nr)
+{
+ char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
+ size_t len;
+ u32 i, j;
+ u8 *nvram;
+
+ nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+ if (!nvram)
+ goto fail;
+
+ /* Copy all valid entries, release old nvram and assign new one.
+ * Valid entries are of type pcie/X/Y/ where X = domain_nr and
+ * Y = bus_nr.
+ */
+ snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
+ len = strlen(prefix);
+ i = 0;
+ j = 0;
+ while (i < nvp->nvram_len - len) {
+ if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
+ i += len;
+ while (nvp->nvram[i] != 0) {
+ nvram[j] = nvp->nvram[i];
+ i++;
+ j++;
+ }
+ nvram[j] = 0;
+ j++;
+ }
+ while (nvp->nvram[i] != 0)
+ i++;
+ i++;
+ }
+ kfree(nvp->nvram);
+ nvp->nvram = nvram;
+ nvp->nvram_len = j;
+ return;
+fail:
+ kfree(nvram);
+ nvp->nvram_len = 0;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
* End of buffer is completed with token identifying length of buffer.
*/
-static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
+static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length,
+ u16 domain_nr, u16 bus_nr)
{
struct nvram_parser nvp;
u32 pad;
@@ -212,6 +372,16 @@ static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
if (nvp.state == END)
break;
}
+ if (nvp.multi_dev_v1)
+ brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
+ else if (nvp.multi_dev_v2)
+ brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+
+ if (nvp.nvram_len == 0) {
+ kfree(nvp.nvram);
+ return NULL;
+ }
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
@@ -239,6 +409,8 @@ struct brcmf_fw {
u16 flags;
const struct firmware *code;
const char *nvram_name;
+ u16 domain_nr;
+ u16 bus_nr;
void (*done)(struct device *dev, const struct firmware *fw,
void *nvram_image, u32 nvram_len);
};
@@ -254,7 +426,8 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
goto fail;
if (fw) {
- nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
+ nvram = brcmf_fw_nvram_strip(fw, &nvram_length,
+ fwctx->domain_nr, fwctx->bus_nr);
release_firmware(fw);
if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
goto fail;
@@ -309,11 +482,12 @@ fail:
kfree(fwctx);
}
-int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
- const char *code, const char *nvram,
- void (*fw_cb)(struct device *dev,
- const struct firmware *fw,
- void *nvram_image, u32 nvram_len))
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len),
+ u16 domain_nr, u16 bus_nr)
{
struct brcmf_fw *fwctx;
@@ -333,8 +507,21 @@ int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
fwctx->done = fw_cb;
if (flags & BRCMF_FW_REQUEST_NVRAM)
fwctx->nvram_name = nvram;
+ fwctx->domain_nr = domain_nr;
+ fwctx->bus_nr = bus_nr;
return request_firmware_nowait(THIS_MODULE, true, code, dev,
GFP_KERNEL, fwctx,
brcmf_fw_request_code_done);
}
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len))
+{
+ return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
+ 0);
+}
+
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
index 4d3482356b77..604dd48ab4e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
@@ -32,6 +32,12 @@ void brcmf_fw_nvram_free(void *nvram);
* fails it will not use the callback, but call device_release_driver()
* instead which will call the driver .remove() callback.
*/
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+ const struct firmware *fw,
+ void *nvram_image, u32 nvram_len),
+ u16 domain_nr, u16 bus_nr);
int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
const char *code, const char *nvram,
void (*fw_cb)(struct device *dev,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index eb1325371d3a..59440631fec5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
}
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
- struct sk_buff *skb)
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
brcmf_flowring_block(flow, flowid, false);
}
+ return skb_queue_len(&ring->skblist);
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
index a34cd394c616..5551861a44bc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
@@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
- struct sk_buff *skb);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb);
struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0dda0ecd23b..5017eaa4af45 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
return 0;
}
-static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
u32 slot_id, struct sk_buff **pktout,
bool remove_item)
{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 65efb1468988..1b47de067d25 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -73,7 +73,7 @@
#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
-#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
+#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
struct msgbuf_common_hdr {
@@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids {
struct brcmf_msgbuf_pktid *array;
};
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
@@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
memcpy(msgbuf->ioctbuf, buf, buf_len);
else
memset(msgbuf->ioctbuf, 0, buf_len);
- brcmf_dma_flush(ioctl_buf, buf_len);
err = brcmf_commonring_write_complete(commonring);
brcmf_commonring_unlock(commonring);
@@ -795,6 +784,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
struct brcmf_flowring *flow = msgbuf->flow;
struct ethhdr *eh = (struct ethhdr *)(skb->data);
u32 flowid;
+ u32 queue_count;
+ bool force;
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
@@ -802,8 +793,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
if (flowid == BRCMF_FLOWRING_INVALID_ID)
return -ENOMEM;
}
- brcmf_flowring_enqueue(flow, flowid, skb);
- brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
+ queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
+ force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
+ brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index c824570ddea3..03f35e0c52ca 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
if (!sdiodev->pdata)
return;
+ if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+ sdiodev->pdata->drive_strength = val;
+
+ /* make sure there are interrupts defined in the node */
+ if (!of_find_property(np, "interrupts", NULL))
+ return;
+
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
- devm_kfree(dev, sdiodev->pdata);
return;
}
irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
@@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->pdata->oob_irq_supported = true;
sdiodev->pdata->oob_irq_nr = irq;
sdiodev->pdata->oob_irq_flags = irqf;
-
- if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
- sdiodev->pdata->drive_strength = val;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 1831ecd0813e..37a2624d7bba 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -51,6 +51,8 @@ enum brcmf_pcie_state {
#define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt"
#define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin"
#define BRCMF_PCIE_43570_NVRAM_NAME "brcm/brcmfmac43570-pcie.txt"
+#define BRCMF_PCIE_4358_FW_NAME "brcm/brcmfmac4358-pcie.bin"
+#define BRCMF_PCIE_4358_NVRAM_NAME "brcm/brcmfmac4358-pcie.txt"
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
@@ -110,10 +112,11 @@ enum brcmf_pcie_state {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
-#define BRCMF_PCIE_MIN_SHARED_VERSION 4
+#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION 5
#define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
-#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
+#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
+#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
#define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
#define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
@@ -145,6 +148,10 @@ enum brcmf_pcie_state {
#define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
#define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
#define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
+#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
+#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
+#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
+#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
#define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
#define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
@@ -189,6 +196,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
struct brcmf_pcie_console {
@@ -244,6 +253,13 @@ struct brcmf_pciedev_info {
bool mbdata_completed;
bool irq_allocated;
bool wowl_enabled;
+ u8 dma_idx_sz;
+ void *idxbuf;
+ u32 idxbuf_sz;
+ dma_addr_t idxbuf_dmahandle;
+ u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
+ void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u16 value);
};
struct brcmf_pcie_ringbuf {
@@ -273,15 +289,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
};
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@@ -329,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
+static u16
+brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+ u16 *address = devinfo->idxbuf + mem_offset;
+
+ return (*(address));
+}
+
+
+static void
+brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+ u16 value)
+{
+ u16 *address = devinfo->idxbuf + mem_offset;
+
+ *(address) = value;
+}
+
+
static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
{
@@ -874,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
- brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
+ devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
return 0;
}
@@ -892,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
- brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
+ devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
return 0;
}
@@ -921,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
- commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
+ commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
commonring->w_ptr, ring->id);
@@ -939,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
- commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
+ commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
commonring->r_ptr, ring->id);
@@ -1044,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
}
kfree(devinfo->shared.flowrings);
devinfo->shared.flowrings = NULL;
+ if (devinfo->idxbuf) {
+ dma_free_coherent(&devinfo->pdev->dev,
+ devinfo->idxbuf_sz,
+ devinfo->idxbuf,
+ devinfo->idxbuf_dmahandle);
+ devinfo->idxbuf = NULL;
+ }
}
@@ -1059,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
u32 addr;
u32 ring_mem_ptr;
u32 i;
+ u64 address;
+ u32 bufsz;
u16 max_sub_queues;
+ u8 idx_offset;
ring_addr = devinfo->shared.ring_info_addr;
brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
+ addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+ max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+
+ if (devinfo->dma_idx_sz != 0) {
+ bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
+ devinfo->dma_idx_sz * 2;
+ devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
+ &devinfo->idxbuf_dmahandle,
+ GFP_KERNEL);
+ if (!devinfo->idxbuf)
+ devinfo->dma_idx_sz = 0;
+ }
- addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
- d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
- d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
- h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
- addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
- h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ if (devinfo->dma_idx_sz == 0) {
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+ d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+ d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+ h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+ h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+ idx_offset = sizeof(u32);
+ devinfo->write_ptr = brcmf_pcie_write_tcm16;
+ devinfo->read_ptr = brcmf_pcie_read_tcm16;
+ brcmf_dbg(PCIE, "Using TCM indices\n");
+ } else {
+ memset(devinfo->idxbuf, 0, bufsz);
+ devinfo->idxbuf_sz = bufsz;
+ idx_offset = devinfo->dma_idx_sz;
+ devinfo->write_ptr = brcmf_pcie_write_idx;
+ devinfo->read_ptr = brcmf_pcie_read_idx;
+
+ h2d_w_idx_ptr = 0;
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
+ address = (u64)devinfo->idxbuf_dmahandle;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+ h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
+ addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
+ address += max_sub_queues * idx_offset;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+ d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
+ address += max_sub_queues * idx_offset;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+ d2h_r_idx_ptr = d2h_w_idx_ptr +
+ BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+ addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
+ address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+ brcmf_dbg(PCIE, "Using host memory indices\n");
+ }
addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -1085,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
- h2d_w_idx_ptr += sizeof(u32);
- h2d_r_idx_ptr += sizeof(u32);
+ h2d_w_idx_ptr += idx_offset;
+ h2d_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
@@ -1100,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring->id = i;
devinfo->shared.commonrings[i] = ring;
- d2h_w_idx_ptr += sizeof(u32);
- d2h_r_idx_ptr += sizeof(u32);
+ d2h_w_idx_ptr += idx_offset;
+ d2h_r_idx_ptr += idx_offset;
ring_mem_ptr += BRCMF_RING_MEM_SZ;
}
- addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
- max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
devinfo->shared.nrof_flowrings =
max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
@@ -1130,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
ring);
ring->w_idx_addr = h2d_w_idx_ptr;
ring->r_idx_addr = h2d_r_idx_ptr;
- h2d_w_idx_ptr += sizeof(u32);
- h2d_r_idx_ptr += sizeof(u32);
+ h2d_w_idx_ptr += idx_offset;
+ h2d_r_idx_ptr += idx_offset;
}
devinfo->shared.flowrings = rings;
return 0;
fail:
- brcmf_err("Allocating commonring buffers failed\n");
+ brcmf_err("Allocating ring buffers failed\n");
brcmf_pcie_release_ringbuffers(devinfo);
return -ENOMEM;
}
@@ -1171,7 +1255,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
- brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
@@ -1189,7 +1272,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
goto fail;
memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
- brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
@@ -1276,10 +1358,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
brcmf_err("Unsupported PCIE version %d\n", version);
return -EINVAL;
}
- if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
- brcmf_err("Unsupported legacy TX mode 0x%x\n",
- shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
- return -EINVAL;
+
+ /* check firmware support dma indicies */
+ if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
+ if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
+ devinfo->dma_idx_sz = sizeof(u16);
+ else
+ devinfo->dma_idx_sz = sizeof(u32);
}
addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
@@ -1333,6 +1418,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
fw_name = BRCMF_PCIE_43570_FW_NAME;
nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
break;
+ case BRCM_CC_4358_CHIP_ID:
+ fw_name = BRCMF_PCIE_4358_FW_NAME;
+ nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
+ break;
default:
brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
return -ENODEV;
@@ -1609,7 +1698,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
bus->msgbuf->commonrings[i] =
&devinfo->shared.commonrings[i]->commonring;
- flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
+ flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
GFP_KERNEL);
if (!flowrings)
goto fail;
@@ -1641,8 +1730,13 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_pciedev_info *devinfo;
struct brcmf_pciedev *pcie_bus_dev;
struct brcmf_bus *bus;
+ u16 domain_nr;
+ u16 bus_nr;
- brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
+ domain_nr = pci_domain_nr(pdev->bus) + 1;
+ bus_nr = pdev->bus->number;
+ brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
+ domain_nr, bus_nr);
ret = -ENOMEM;
devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
@@ -1691,10 +1785,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
- ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
- BRCMF_FW_REQ_NV_OPTIONAL,
- devinfo->fw_name, devinfo->nvram_name,
- brcmf_pcie_setup);
+ ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+ BRCMF_FW_REQ_NV_OPTIONAL,
+ devinfo->fw_name, devinfo->nvram_name,
+ brcmf_pcie_setup, domain_nr, bus_nr);
if (ret == 0)
return 0;
fail_bus:
@@ -1850,9 +1944,11 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index ab0c89833013..bf7a8b1ad914 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -601,6 +601,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
#define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
#define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
#define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM43241B5_FIRMWARE_NAME "brcm/brcmfmac43241b5-sdio.bin"
+#define BCM43241B5_NVRAM_NAME "brcm/brcmfmac43241b5-sdio.txt"
#define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
#define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
#define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
@@ -628,6 +630,8 @@ MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
@@ -667,7 +671,8 @@ enum brcmf_firmware_type {
static const struct brcmf_firmware_names brcmf_fwname_data[] = {
{ BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
{ BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
- { BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+ { BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
{ BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
{ BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
{ BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
@@ -3550,10 +3555,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
return;
}
- if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
- brcmf_err("bus is down. we have nothing to do\n");
- return;
- }
/* Count the interrupt call */
bus->sdcnt.intrcount++;
if (in_interrupt())
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 5df6aa72cc2d..daba86d881bc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1270,8 +1270,13 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->chiprev = bus_pub->chiprev;
/* request firmware here */
- brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
- brcmf_usb_probe_phase2);
+ ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
+ NULL, brcmf_usb_probe_phase2);
+ if (ret) {
+ brcmf_err("firmware request failed: %d\n", ret);
+ goto fail;
+ }
+
return 0;
fail:
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 48135063347e..ab775a5d5b33 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -41,8 +41,7 @@
#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
-#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define MAC_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
@@ -743,8 +742,6 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
- if (changed_flags & FIF_PROMISC_IN_BSS)
- brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
brcms_dbg_info(core, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
@@ -1063,10 +1060,9 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
*/
static int ieee_hw_init(struct ieee80211_hw *hw)
{
- hw->flags = IEEE80211_HW_SIGNAL_DBM
- /* | IEEE80211_HW_CONNECTION_MONITOR What is this? */
- | IEEE80211_HW_REPORTS_TX_ACK_STATUS
- | IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 369527e27689..9728be0e704b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3571,7 +3571,7 @@ void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
wlc->filter_flags = filter_flags;
- if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+ if (filter_flags & FIF_OTHER_BSS)
promisc_bits |= MCTL_PROMISC;
if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 4efdd51af9c8..7a6daa37dc6b 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -45,6 +45,7 @@
#define BRCM_CC_43567_CHIP_ID 43567
#define BRCM_CC_43569_CHIP_ID 43569
#define BRCM_CC_43570_CHIP_ID 43570
+#define BRCM_CC_4358_CHIP_ID 0x4358
#define BRCM_CC_43602_CHIP_ID 43602
/* USB Device IDs */
@@ -59,9 +60,11 @@
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
#define BRCM_PCIE_43602_2G_DEVICE_ID 0x43bb
#define BRCM_PCIE_43602_5G_DEVICE_ID 0x43bc
+#define BRCM_PCIE_43602_RAW_DEVICE_ID 43602
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3689dbbd10bd..0e51e27d2e3f 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -278,14 +278,14 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
else
priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC;
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index b0f65fa09428..b86500b4418f 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -578,13 +578,11 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
{
struct cw1200_common *priv = dev->priv;
bool listening = !!(*total_flags &
- (FIF_PROMISC_IN_BSS |
- FIF_OTHER_BSS |
+ (FIF_OTHER_BSS |
FIF_BCN_PRBRESP_PROMISC |
FIF_PROBE_REQ));
- *total_flags &= FIF_PROMISC_IN_BSS |
- FIF_OTHER_BSS |
+ *total_flags &= FIF_OTHER_BSS |
FIF_FCSFAIL |
FIF_BCN_PRBRESP_PROMISC |
FIF_PROBE_REQ;
@@ -592,14 +590,12 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
down(&priv->scan.lock);
mutex_lock(&priv->conf_mutex);
- priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
- ? 1 : 0;
+ priv->rx_filter.promiscuous = 0;
priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
FIF_PROBE_REQ)) ? 1 : 0;
priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
priv->disable_beacon_filter = !(*total_flags &
(FIF_BCN_PRBRESP_PROMISC |
- FIF_PROMISC_IN_BSS |
FIF_PROBE_REQ));
if (priv->listening != listening) {
priv->listening = listening;
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index e5665804d986..7f4cb692cc57 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3048,7 +3048,7 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
*total_flags);
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -3074,7 +3074,7 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
* filters into the device.
*/
*total_flags &=
- FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
@@ -3561,8 +3561,10 @@ il3945_setup_mac(struct il_priv *il)
hw->vif_data_size = sizeof(struct il_vif_priv);
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 976f65fe9c38..44fa422f255e 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5751,11 +5751,13 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->rate_control_algorithm = "iwl-4965-rs";
/* Tell mac80211 our characteristics */
- hw->flags =
- IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
if (il->cfg->sku & IL_SKU_N)
hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS;
@@ -6166,7 +6168,7 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
*total_flags);
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -6192,7 +6194,7 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
* filters into the device.
*/
*total_flags &=
- FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_OTHER_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index f89f446e5c8a..aba095761ac6 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -21,6 +21,7 @@ config IWLWIFI
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
Intel 7265 Wi-Fi Adapter
+ Intel 8260 Wi-Fi Adapter
Intel 3165 Wi-Fi Adapter
@@ -54,16 +55,17 @@ config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
default IWLWIFI
help
- This is the driver that supports the DVM firmware which is
- used by most existing devices (with the exception of 7260
- and 3160).
+ This is the driver that supports the DVM firmware. The list
+ of the devices that use this firmware is available here:
+ https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
select WANT_DEV_COREDUMP
help
- This is the driver that supports the MVM firmware which is
- currently only available for 7260 and 3160 devices.
+ This is the driver that supports the MVM firmware. The list
+ of the devices that use this firmware is available here:
+ https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 3d32f4120174..dbfc5b18bcb7 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,6 +9,7 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
+iwlwifi-objs += iwl-trans.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 5abd62ed8cb4..7acaa266b704 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -104,15 +104,16 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->rate_control_algorithm = "iwl-agn-rs";
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_WANT_MONITOR_VIF;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
@@ -135,7 +136,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
*/
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
!iwlwifi_mod_params.sw_crypto)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->vif_data_size = sizeof(struct iwl_vif_priv);
@@ -1061,7 +1062,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
changed_flags, *total_flags);
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -1088,7 +1089,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
* since we currently do not support programming multicast
* filters into the device.
*/
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
@@ -1140,7 +1141,6 @@ static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
return;
IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
if (priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist) {
@@ -1149,13 +1149,12 @@ static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
else if (event->u.rssi.data == RSSI_EVENT_HIGH)
priv->bt_enable_pspoll = false;
- iwlagn_send_advance_bt_config(priv);
+ queue_work(priv->workqueue, &priv->bt_runtime_config);
} else {
IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
"ignoring RSSI callback\n");
}
- mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -1343,9 +1342,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
* other interfaces are added, this is safe.
*/
if (vif->type == NL80211_IFTYPE_MONITOR)
- priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
else
- priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags);
err = iwl_setup_interface(priv, ctx);
if (!err || reset)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 74ad278116be..cc35f796d406 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 13
+#define IWL7260_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 12
@@ -124,6 +124,28 @@ static const struct iwl_base_params iwl7000_base_params = {
.apmg_wake_up_wa = true,
};
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 300},
+ {.temperature = 113, .backoff = 800},
+ {.temperature = 114, .backoff = 1500},
+ {.temperature = 115, .backoff = 3000},
+ {.temperature = 116, .backoff = 5000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
static const struct iwl_ht_params iwl7000_ht_params = {
.stbc = true,
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
@@ -166,6 +188,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
+ .thermal_params = &iwl7000_high_temp_tt_params,
};
const struct iwl_cfg iwl7260_2n_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index ce6321b7d241..72040cd0b979 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 13
+#define IWL8000_UCODE_API_MAX 15
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 12
@@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
-#define IWL_DEVICE_8000 \
- .ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
- .ucode_api_min = IWL8000_UCODE_API_MIN, \
- .device_family = IWL_DEVICE_FAMILY_8000, \
- .max_inst_size = IWL60_RTC_INST_SIZE, \
- .max_data_size = IWL60_RTC_DATA_SIZE, \
- .base_params = &iwl8000_base_params, \
- .led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
- .d0i3 = true, \
- .non_shared_ant = ANT_A, \
- .dccm_offset = IWL8260_DCCM_OFFSET, \
- .dccm_len = IWL8260_DCCM_LEN, \
- .dccm2_offset = IWL8260_DCCM2_OFFSET, \
- .dccm2_len = IWL8260_DCCM2_LEN, \
- .smem_offset = IWL8260_SMEM_OFFSET, \
- .smem_len = IWL8260_SMEM_LEN
+static const struct iwl_tt_params iwl8000_tt_params = {
+ .ct_kill_entry = 115,
+ .ct_kill_exit = 93,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 111,
+ .dynamic_smps_exit = 107,
+ .tx_protection_entry = 112,
+ .tx_protection_exit = 105,
+ .tx_backoff = {
+ {.temperature = 110, .backoff = 200},
+ {.temperature = 111, .backoff = 600},
+ {.temperature = 112, .backoff = 1200},
+ {.temperature = 113, .backoff = 2000},
+ {.temperature = 114, .backoff = 4000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_8000 \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl8000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
+ .d0i3 = true, \
+ .non_shared_ant = ANT_A, \
+ .dccm_offset = IWL8260_DCCM_OFFSET, \
+ .dccm_len = IWL8260_DCCM_LEN, \
+ .dccm2_offset = IWL8260_DCCM2_OFFSET, \
+ .dccm2_len = IWL8260_DCCM2_LEN, \
+ .smem_offset = IWL8260_SMEM_OFFSET, \
+ .smem_len = IWL8260_SMEM_LEN, \
+ .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
+ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
+ .thermal_params = &iwl8000_tt_params, \
+ .apmg_not_supported = true
const struct iwl_cfg iwl8260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 8260",
@@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
- .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.disable_dummy_notification = true,
.max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
- .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
.bt_shared_single_ant = true,
.disable_dummy_notification = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 3f33f753ce2f..08c14afeb148 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -195,6 +195,49 @@ struct iwl_ht_params {
};
/*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+ s32 temperature;
+ u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
+ * to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+ s32 ct_kill_entry;
+ s32 ct_kill_exit;
+ u32 ct_kill_duration;
+ s32 dynamic_smps_entry;
+ s32 dynamic_smps_exit;
+ s32 tx_protection_entry;
+ s32 tx_protection_exit;
+ struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+ bool support_ct_kill;
+ bool support_dynamic_smps;
+ bool support_tx_protection;
+ bool support_tx_backoff;
+};
+
+/*
* information on how to parse the EEPROM
*/
#define EEPROM_REG_BAND_1_CHANNELS 0x08
@@ -316,6 +359,8 @@ struct iwl_cfg {
const u32 dccm2_len;
const u32 smem_offset;
const u32 smem_len;
+ const struct iwl_tt_params *thermal_params;
+ bool apmg_not_supported;
};
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
index 223b8752f924..948ce0802fa7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
TRACE_EVENT(iwlwifi_dev_rx,
TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
- void *rxbuf, size_t len),
- TP_ARGS(dev, trans, rxbuf, len),
+ struct iwl_rx_packet *pkt, size_t len),
+ TP_ARGS(dev, trans, pkt, len),
TP_STRUCT__entry(
DEV_ENTRY
- __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
+ __field(u8, cmd)
+ __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
),
TP_fast_assign(
DEV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf,
- iwl_rx_trace_len(trans, rxbuf, len));
+ __entry->cmd = pkt->hdr.cmd;
+ memcpy(__get_dynamic_array(rxbuf), pkt,
+ iwl_rx_trace_len(trans, pkt, len));
),
TP_printk("[%s] RX cmd %#.2x",
- __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
+ __get_str(dev), __entry->cmd)
);
TRACE_EVENT(iwlwifi_dev_tx,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 7267152e7dc7..6685259927f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_api *ucode_api = (void *)data;
u32 api_index = le32_to_cpu(ucode_api->api_index);
+ u32 api_flags = le32_to_cpu(ucode_api->api_flags);
+ int i;
- if (api_index >= IWL_API_ARRAY_SIZE) {
+ if (api_index >= IWL_API_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
- return -EINVAL;
+ /* don't return an error so we can load FW that has more bits */
+ return 0;
}
- capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+ for (i = 0; i < 32; i++) {
+ if (api_flags & BIT(i))
+ __set_bit(i + 32 * api_index, capa->_api);
+ }
return 0;
}
@@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
{
const struct iwl_ucode_capa *ucode_capa = (void *)data;
u32 api_index = le32_to_cpu(ucode_capa->api_index);
+ u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
+ int i;
- if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+ if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
IWL_ERR(drv, "api_index larger than supported by driver\n");
- return -EINVAL;
+ /* don't return an error so we can load FW that has more bits */
+ return 0;
}
- capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+ for (i = 0; i < 32; i++) {
+ if (api_flags & BIT(i))
+ __set_bit(i + 32 * api_index, capa->_capa);
+ }
return 0;
}
@@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (err)
goto try_again;
- if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+ if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
api_ver = drv->fw.ucode_ver;
else
api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
sizeof(struct iwl_fw_dbg_trigger_txq_timer);
trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
sizeof(struct iwl_fw_dbg_trigger_time_event);
+ trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
+ sizeof(struct iwl_fw_dbg_trigger_ba);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d45dc021cda2..d56064861a9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index 251bf8dc4a12..e57dbd0ef2e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* detection.
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_RSSI,
FW_DBG_TRIGGER_TXQ_TIMERS,
FW_DBG_TRIGGER_TIME_EVENT,
+ FW_DBG_TRIGGER_BA,
/* must be last */
FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 62db2e5e45eb..a9b5ae4ebec0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
+typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@@ -255,22 +257,27 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
* @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
* @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ * instead of 3.
*/
enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3),
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
- IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9),
- IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10),
- IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11),
- IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
- IWL_UCODE_TLV_API_SCD_CFG = BIT(15),
- IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
- IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17),
- IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18),
- IWL_UCODE_TLV_API_STATS_V10 = BIT(19),
- IWL_UCODE_TLV_API_NEW_VERSION = BIT(20),
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
+ IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10,
+ IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11,
+ IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13,
+ IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15,
+ IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16,
+ IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
+ IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
};
+typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@@ -290,6 +297,7 @@ enum iwl_ucode_tlv_api {
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -299,22 +307,23 @@ enum iwl_ucode_tlv_api {
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
*/
enum iwl_ucode_tlv_capa {
- IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1),
- IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2),
- IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3),
- IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6),
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8),
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9),
- IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
- IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
- IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
- IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13),
- IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22),
- IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28),
- IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29),
- IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30),
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
+ IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
+ IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
};
/* The default calibrate table size if not specified by firmware file */
@@ -325,13 +334,14 @@ enum iwl_ucode_tlv_capa {
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+#define IWL_API_MAX_BITS 64
+#define IWL_CAPABILITIES_MAX_BITS 64
+
/*
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
#define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE 1
-#define IWL_CAPABILITIES_ARRAY_SIZE 1
#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
/* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -424,11 +434,13 @@ struct iwl_fw_dbg_reg_op {
* @SMEM_MODE: monitor stores the data in SMEM
* @EXTERNAL_MODE: monitor stores the data in allocated DRAM
* @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
*/
enum iwl_fw_dbg_monitor_mode {
SMEM_MODE = 0,
EXTERNAL_MODE = 1,
MARBH_MODE = 2,
+ MIPI_MODE = 3,
};
/**
@@ -436,6 +448,7 @@ enum iwl_fw_dbg_monitor_mode {
*
* @version: version of the TLV - currently 0
* @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -449,7 +462,8 @@ enum iwl_fw_dbg_monitor_mode {
struct iwl_fw_dbg_dest_tlv {
u8 version;
u8 monitor_mode;
- u8 reserved[2];
+ u8 size_power;
+ u8 reserved;
__le32 base_reg;
__le32 end_reg;
__le32 write_ptr_reg;
@@ -659,6 +673,33 @@ struct iwl_fw_dbg_trigger_time_event {
} __packed;
/**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ * when a frame times out in the reodering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+ __le16 rx_ba_start;
+ __le16 rx_ba_stop;
+ __le16 tx_ba_start;
+ __le16 tx_ba_stop;
+ __le16 rx_bar;
+ __le16 tx_bar;
+ __le16 frame_timeout;
+} __packed;
+
+/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
* @id: conf id
* @usniffer: should the uSniffer image be used
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index cf75bafae51d..3e3c9d8b3c37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -105,10 +105,24 @@ struct iwl_ucode_capabilities {
u32 n_scan_channels;
u32 standard_phy_calibration_size;
u32 flags;
- u32 api[IWL_API_ARRAY_SIZE];
- u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
+ unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
+ unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
};
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+ iwl_ucode_tlv_api_t api)
+{
+ return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+ iwl_ucode_tlv_capa_t capa)
+{
+ return test_bit((__force long)capa, capabilities->_capa);
+}
+
/* one for each uCode image (inst/data, init/runtime/wowlan) */
struct fw_desc {
const void *data; /* vmalloc'ed data */
@@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode)
return "EXTERNAL_DRAM";
case MARBH_MODE:
return "MARBH";
+ case MIPI_MODE:
+ return "MIPI";
default:
return "UNKNOWN";
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 8e604a3931ca..80fefe7d7b8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -249,7 +249,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
*/
if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
(flags & IEEE80211_CHAN_NO_IR))
- flags |= IEEE80211_CHAN_GO_CONCURRENT;
+ flags |= IEEE80211_CHAN_IR_CONCURRENT;
return flags;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 88a57e6e232f..5af1c776d2d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -348,6 +348,9 @@ enum secure_load_status_reg {
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
+#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
+#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
+
#define DBGC_IN_SAMPLE (0xa03c00)
/* enable the ID buf for read */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644
index 000000000000..9f8bcefc04c5
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include "iwl-trans.h"
+
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ struct device *dev,
+ const struct iwl_cfg *cfg,
+ const struct iwl_trans_ops *ops,
+ size_t dev_cmd_headroom)
+{
+ struct iwl_trans *trans;
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key __key;
+#endif
+
+ trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+ if (!trans)
+ return NULL;
+
+#ifdef CONFIG_LOCKDEP
+ lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+ &__key, 0);
+#endif
+
+ trans->dev = dev;
+ trans->cfg = cfg;
+ trans->ops = ops;
+ trans->dev_cmd_headroom = dev_cmd_headroom;
+
+ snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+ trans->dev_cmd_pool =
+ kmem_cache_create(trans->dev_cmd_pool_name,
+ sizeof(struct iwl_device_cmd)
+ + trans->dev_cmd_headroom,
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!trans->dev_cmd_pool)
+ goto free;
+
+ return trans;
+ free:
+ kfree(trans);
+ return NULL;
+}
+
+void iwl_trans_free(struct iwl_trans *trans)
+{
+ kmem_cache_destroy(trans->dev_cmd_pool);
+ kfree(trans);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 56254a837214..87a230a7f4b6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -641,6 +641,8 @@ struct iwl_trans {
enum iwl_d0i3_mode d0i3_mode;
+ bool wowlan_d0i3;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -1011,19 +1013,19 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
}
/*****************************************************
+ * transport helper functions
+ *****************************************************/
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+ struct device *dev,
+ const struct iwl_cfg *cfg,
+ const struct iwl_trans_ops *ops,
+ size_t dev_cmd_headroom);
+void iwl_trans_free(struct iwl_trans *trans);
+
+/*****************************************************
* driver (transport) register/unregister functions
******************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
-static inline void trans_lockdep_init(struct iwl_trans *trans)
-{
-#ifdef CONFIG_LOCKDEP
- static struct lock_class_key __key;
-
- lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
- &__key, 0);
-#endif
-}
-
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 13a0a03158de..b4737e296c92 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
- struct iwl_bt_coex_cmd *bt_cmd;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret;
+ struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_send_bt_init_conf_old(mvm);
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
-
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
mode = 0;
}
- bt_cmd->mode = cpu_to_le32(mode);
+ bt_cmd.mode = cpu_to_le32(mode);
goto send_cmd;
}
mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
- bt_cmd->mode = cpu_to_le32(mode);
+ bt_cmd.mode = cpu_to_le32(mode);
if (IWL_MVM_BT_COEX_SYNC2SCO)
- bt_cmd->enabled_modules |=
+ bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
if (iwl_mvm_bt_is_plcr_supported(mvm))
- bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
if (IWL_MVM_BT_COEX_MPLUT) {
- bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
- bt_cmd->enabled_modules |=
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+ bt_cmd.enabled_modules |=
cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
}
- bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+ bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
send_cmd:
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
+ return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
return 0;
}
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
-
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
-
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- /* If channel context is invalid or not on 2.4GHz - don't count it */
- if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- if (vif->type != NL80211_IFTYPE_STATION ||
- mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data data = {
- .mvm = mvm,
- };
int ret;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
return;
}
@@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_rssi_iterator, &data);
}
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
@@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant)
return true;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant)
return true;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
- return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
+ return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
if (band != IEEE80211_BAND_2GHZ)
@@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
iwl_mvm_bt_coex_vif_change_old(mvm);
return;
}
@@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
if (!iwl_mvm_bt_is_plcr_supported(mvm))
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 4310cf102d78..4165d104e4c3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
- iwl_mvm_cancel_scan(mvm);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
iwl_trans_stop_device(mvm->trans);
@@ -981,7 +981,8 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
+ ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+ IWL_MVM_SCAN_NETDETECT);
if (ret)
return ret;
@@ -1169,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
iwl_trans_suspend(mvm->trans);
- if (wowlan->any) {
+ mvm->trans->wowlan_d0i3 = wowlan->any;
+ if (mvm->trans->wowlan_d0i3) {
/* 'any' trigger means d0i3 usage */
if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
int ret = iwl_mvm_enter_d0i3_sync(mvm);
@@ -1784,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
- int n_channels = 0;
+ int idx, n_channels = 0;
fw_match = &query.matches[i];
@@ -1799,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
net_detect->matches[net_detect->n_matches++] = match;
- match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
- memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+ */
+ idx = mvm->n_nd_match_sets - i - 1;
+ match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+ memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
match->ssid.ssid_len);
if (mvm->n_nd_channels < n_channels)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5f37eab5008d..5c8a65de0e77 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
return ret ?: count;
}
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ char buf[64];
+ int bufsz = sizeof(buf);
+ int pos;
+
+ pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+ vif->bss_conf.txpower);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
} while (0)
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 9ac04c1ea706..ffb4b5cef275 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_profile_notif_old *notif =
&mvm->last_bt_notif_old;
@@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
pos += scnprintf(buf+pos, bufsz-pos,
@@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
if (mvm->scan_rx_ant != scan_rx_ant) {
mvm->scan_rx_ant = scan_rx_ant;
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
}
@@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
PRINT_MVM_REF(IWL_MVM_REF_SCAN);
PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
PRINT_MVM_REF(IWL_MVM_REF_USER);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d6cced47d561..5e4cbdb44c60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -274,50 +274,18 @@ struct iwl_scan_offload_profile_cfg {
} __packed;
/**
- * iwl_scan_offload_schedule - schedule of scan offload
+ * iwl_scan_schedule_lmac - schedule of scan offload
* @delay: delay between iterations, in seconds.
* @iterations: num of scan iterations
* @full_scan_mul: number of partial scans before each full scan
*/
-struct iwl_scan_offload_schedule {
+struct iwl_scan_schedule_lmac {
__le16 delay;
u8 iterations;
u8 full_scan_mul;
-} __packed;
-
-/*
- * iwl_scan_offload_flags
- *
- * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
- * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
- * beacon period. Finding channel activity in this mode is not guaranteed.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
- * Assuming beacon period is 100ms finding channel activity is guaranteed.
- */
-enum iwl_scan_offload_flags {
- IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0),
- IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2),
- IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5),
- IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
-};
-
-/**
- * iwl_scan_offload_req - scan offload request command
- * @flags: bitmap - enum iwl_scan_offload_flags.
- * @watchdog: maximum scan duration in TU.
- * @delay: delay in seconds before first iteration.
- * @schedule_line: scan offload schedule, for fast and regular scan.
- */
-struct iwl_scan_offload_req {
- __le16 flags;
- __le16 watchdog;
- __le16 delay;
- __le16 reserved;
- struct iwl_scan_offload_schedule schedule_line[2];
-} __packed;
+} __packed; /* SCAN_SCHEDULE_API_S */
-enum iwl_scan_offload_compleate_status {
+enum iwl_scan_offload_complete_status {
IWL_SCAN_OFFLOAD_COMPLETED = 1,
IWL_SCAN_OFFLOAD_ABORTED = 2,
};
@@ -326,6 +294,7 @@ enum iwl_scan_ebs_status {
IWL_SCAN_EBS_SUCCESS,
IWL_SCAN_EBS_FAILED,
IWL_SCAN_EBS_CHAN_NOT_FOUND,
+ IWL_SCAN_EBS_INACTIVE,
};
/**
@@ -463,8 +432,19 @@ enum iwl_scan_priority {
IWL_SCAN_PRIORITY_HIGH,
};
+enum iwl_scan_priority_ext {
+ IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+ IWL_SCAN_PRIORITY_EXT_1,
+ IWL_SCAN_PRIORITY_EXT_2,
+ IWL_SCAN_PRIORITY_EXT_3,
+ IWL_SCAN_PRIORITY_EXT_4,
+ IWL_SCAN_PRIORITY_EXT_5,
+ IWL_SCAN_PRIORITY_EXT_6,
+ IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
/**
- * iwl_scan_req_unified_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
* @reserved1: for alignment and future use
* @channel_num: num of channels to scan
* @active-dwell: dwell time for active channels
@@ -487,7 +467,7 @@ enum iwl_scan_priority {
* @channel_opt: channel optimization options, for full and partial scan
* @data: channel configuration and probe request packet.
*/
-struct iwl_scan_req_unified_lmac {
+struct iwl_scan_req_lmac {
/* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
__le32 reserved1;
u8 n_channels;
@@ -508,7 +488,7 @@ struct iwl_scan_req_unified_lmac {
/* SCAN_REQ_PERIODIC_PARAMS_API_S */
__le32 iter_num;
__le32 delay;
- struct iwl_scan_offload_schedule schedule[2];
+ struct iwl_scan_schedule_lmac schedule[2];
struct iwl_scan_channel_opt channel_opt[2];
u8 data[];
} __packed;
@@ -582,7 +562,11 @@ struct iwl_mvm_umac_cmd_hdr {
u8 ver;
} __packed;
-#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ */
+#define IWL_MVM_MAX_UMAC_SCANS 8
+#define IWL_MVM_MAX_LMAC_SCANS 1
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
@@ -865,4 +849,27 @@ struct iwl_scan_offload_profiles_query {
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_umac_scan_iter_complete_notif {
+ __le32 uid;
+ u8 scanned_channels;
+ u8 status;
+ u8 bt_status;
+ u8 last_channel;
+ __le32 tsf_low;
+ __le32 tsf_high;
+ struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 01b1da6ad359..16e9ef49397f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,6 +108,7 @@ enum {
ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* UMAC scan commands */
+ SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
SCAN_CFG_CMD = 0xc,
SCAN_REQ_UMAC = 0xd,
SCAN_ABORT_UMAC = 0xe,
@@ -147,13 +148,6 @@ enum {
LQ_CMD = 0x4e,
- /* Calibration */
- TEMPERATURE_NOTIFICATION = 0x62,
- CALIBRATION_CFG_CMD = 0x65,
- CALIBRATION_RES_NOTIFICATION = 0x66,
- CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
- RADIO_VERSION_NOTIFICATION = 0x68,
-
/* Scan offload */
SCAN_OFFLOAD_REQUEST_CMD = 0x51,
SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -177,12 +171,8 @@ enum {
/* Thermal Throttling*/
REPLY_THERMAL_MNG_BACKOFF = 0x7e,
- /* Scanning */
- SCAN_REQUEST_CMD = 0x80,
- SCAN_ABORT_CMD = 0x81,
- SCAN_START_NOTIFICATION = 0x82,
- SCAN_RESULTS_NOTIFICATION = 0x83,
- SCAN_COMPLETE_NOTIFICATION = 0x84,
+ /* Set/Get DC2DC frequency tune */
+ DC2DC_CONFIG_CMD = 0x83,
/* NVM */
NVM_ACCESS_CMD = 0x88,
@@ -1402,6 +1392,49 @@ struct iwl_mvm_marker {
__le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+ DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */
+ DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_cmd - configure dc2dc values
+ *
+ * (DC2DC_CONFIG_CMD = 0x83)
+ *
+ * Set/Get & configure dc2dc values.
+ * The command always returns the current dc2dc values.
+ *
+ * @flags: set/get dc2dc
+ * @enable_low_power_mode: not used.
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_cmd {
+ __le32 flags;
+ __le32 enable_low_power_mode; /* not used */
+ __le32 dc2dc_freq_tune0;
+ __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
+ *
+ * Current dc2dc values returned by the FW.
+ *
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_resp {
+ __le32 dc2dc_freq_tune0;
+ __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
+
/***********************************
* Smart Fifo API
***********************************/
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index df869633f4dd..eb10c5ee4a14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
if (!mvm->trans->ltr_enabled)
return 0;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
return iwl_mvm_config_ltr_v1(mvm);
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
@@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
* device that are triggered by the INIT firwmare (MFUART).
*/
_iwl_trans_stop_device(mvm->trans, false);
- _iwl_trans_start_hw(mvm->trans, false);
+ ret = _iwl_trans_start_hw(mvm->trans, false);
if (ret)
- return ret;
+ goto error;
}
if (iwlmvm_mod_params.init_dbg)
@@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
ret = iwl_mvm_config_scan(mvm);
if (ret)
goto error;
@@ -832,21 +832,6 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
return 0;
}
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
-
- /* TODO: what to do with that? */
- IWL_DEBUG_INFO(mvm,
- "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
- le32_to_cpu(radio_version->radio_flavor),
- le32_to_cpu(radio_version->radio_step),
- le32_to_cpu(radio_version->radio_dash));
- return 0;
-}
-
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8088c7137f7c..1812dd018af2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -852,7 +852,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
MAC_FILTER_IN_BEACON |
MAC_FILTER_IN_PROBE_REQUEST |
MAC_FILTER_IN_CRC32);
- mvm->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -1270,7 +1270,7 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->uploaded = false;
if (vif->type == NL80211_IFTYPE_MONITOR)
- mvm->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dda9f7b5f342..dfdab38e2d4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -80,7 +80,6 @@
#include "sta.h"
#include "time-event.h"
#include "iwl-eeprom-parse.h"
-#include "fw-api-scan.h"
#include "iwl-phy-db.h"
#include "testmode.h"
#include "iwl-fw-error-dump.h"
@@ -319,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
if (IS_ERR_OR_NULL(resp)) {
IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
- PTR_RET(resp));
+ PTR_ERR_OR_ZERO(resp));
goto out;
}
@@ -335,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
kfree(resp);
if (IS_ERR_OR_NULL(regd)) {
IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
- PTR_RET(regd));
+ PTR_ERR_OR_ZERO(regd));
goto out;
}
@@ -416,20 +415,27 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
int num_mac, ret, i;
+ static const u32 mvm_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ };
/* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TIMING_BEACON_ONLY |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_CHANCTX_STA_CSA |
- IEEE80211_HW_SUPPORTS_CLONED_SKBS;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
hw->queues = mvm->first_agg_queue;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -441,19 +447,38 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
+ memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+ hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+ hw->wiphy->cipher_suites = mvm->ciphers;
+
/*
* Enable 11w if advertised by firmware and software crypto
* is not enabled (as the firmware will interpret some mgmt
* packets, so enabling it with software crypto isn't safe)
*/
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
- !iwlwifi_mod_params.sw_crypto)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ !iwlwifi_mod_params.sw_crypto) {
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_AES_CMAC;
+ hw->wiphy->n_cipher_suites++;
+ }
- hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+ /* currently FW API supports only one optional cipher scheme */
+ if (mvm->fw->cs[0].cipher) {
+ mvm->hw->n_cipher_schemes = 1;
+ mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ mvm->fw->cs[0].cipher;
+ hw->wiphy->n_cipher_suites++;
+ }
+
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
- NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -506,10 +531,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
- hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
+ hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+ BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+ else
+ mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+
if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
@@ -517,10 +551,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
- if ((mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
- (mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS))
hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
}
@@ -532,14 +566,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- /* we create the 802.11 header and zero length SSID IE. */
- hw->wiphy->max_sched_scan_ie_len =
- SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- }
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ /* we create the 802.11 header and zero length SSID IE. */
+ hw->wiphy->max_sched_scan_ie_len =
+ SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -548,30 +580,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_QUIET;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
hw->wiphy->features |=
NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
- /* currently FW API supports only one optional cipher scheme */
- if (mvm->fw->cs[0].cipher) {
- mvm->hw->n_cipher_schemes = 1;
- mvm->hw->cipher_schemes = &mvm->fw->cs[0];
- }
-
#ifdef CONFIG_PM_SLEEP
if (iwl_mvm_is_d0i3_supported(mvm) &&
device_can_wakeup(mvm->trans->dev)) {
@@ -611,13 +637,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
}
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
}
@@ -730,6 +757,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
return true;
}
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
+ do { \
+ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
+ break; \
+ iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
+ } while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+ enum ieee80211_ampdu_mlme_action action)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ switch (action) {
+ case IEEE80211_AMPDU_TX_OPERATIONAL: {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+ "TX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, tid_data->ssn);
+ break;
+ }
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+ "TX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+ "RX AGG START: MAC %pM tid %d ssn %d\n",
+ sta->addr, tid, rx_ba_ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+ "RX AGG STOP: MAC %pM tid %d\n",
+ sta->addr, tid);
+ break;
+ default:
+ break;
+ }
+}
+
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -806,6 +887,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
+
+ if (!ret) {
+ u16 rx_ba_ssn = 0;
+
+ if (action == IEEE80211_AMPDU_RX_START)
+ rx_ba_ssn = *ssn;
+
+ iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+ rx_ba_ssn, action);
+ }
mutex_unlock(&mvm->mutex);
/*
@@ -1227,22 +1318,23 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_trans_stop_device(mvm->trans);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ mvm->scan_status = 0;
mvm->ps_disabled = false;
mvm->calibrating = false;
/* just in case one was running */
ieee80211_remain_on_channel_expired(mvm->hw);
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- iwl_mvm_cleanup_iterator, mvm);
+ /*
+ * cleanup all interfaces, even inactive ones, as some might have
+ * gone down during the HW restart
+ */
+ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
- memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
@@ -1404,7 +1496,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
* The work item could be running or queued if the
* ROC time event stops just as we get here.
*/
- cancel_work_sync(&mvm->roc_done_wk);
+ flush_work(&mvm->roc_done_wk);
iwl_trans_stop_device(mvm->trans);
@@ -1417,20 +1509,24 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
+ * But make sure to cleanup interfaces that have gone down before/during
+ * HW restart was requested.
*/
- clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_cleanup_iterator, mvm);
/* We shouldn't have any UIDs still set. Loop over all the UIDs to
* make sure there's nothing left there and warn if any is found.
*/
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
int i;
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
- if (WARN_ONCE(mvm->scan_uid[i],
- "UMAC scan UID %d was not cleaned\n",
- mvm->scan_uid[i]))
- mvm->scan_uid[i] = 0;
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
}
}
@@ -1495,7 +1591,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
.pwr_restriction = cpu_to_le16(8 * tx_power),
};
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@@ -2354,7 +2450,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
- iwl_mvm_scan_offload_stop(mvm, true);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -2373,89 +2469,21 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
}
-static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
- enum iwl_scan_status scan_type)
-{
- int ret;
- bool wait_for_handlers = false;
-
- mutex_lock(&mvm->mutex);
-
- if (mvm->scan_status != scan_type) {
- ret = 0;
- /* make sure there are no pending notifications */
- wait_for_handlers = true;
- goto out;
- }
-
- switch (scan_type) {
- case IWL_MVM_SCAN_SCHED:
- ret = iwl_mvm_scan_offload_stop(mvm, true);
- break;
- case IWL_MVM_SCAN_OS:
- ret = iwl_mvm_cancel_scan(mvm);
- break;
- case IWL_MVM_SCAN_NONE:
- default:
- WARN_ON_ONCE(1);
- ret = -EINVAL;
- break;
- }
- if (ret)
- goto out;
-
- wait_for_handlers = true;
-out:
- mutex_unlock(&mvm->mutex);
-
- /* make sure we consume the completion notification */
- if (wait_for_handlers)
- iwl_mvm_wait_for_async_handlers(mvm);
-
- return ret;
-}
static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct cfg80211_scan_request *req = &hw_req->req;
int ret;
- if (req->n_channels == 0 ||
- req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
+ if (hw_req->req.n_channels == 0 ||
+ hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
- if (ret)
- return ret;
- }
-
mutex_lock(&mvm->mutex);
-
- if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
- IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
- ret = -EBUSY;
- goto out;
- }
-
- if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
- ret = -EBUSY;
- goto out;
- }
-
- iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
-
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
- ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
- else
- ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
-
- if (ret)
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-out:
+ ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
mutex_unlock(&mvm->mutex);
+
return ret;
}
@@ -2473,12 +2501,8 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
* cancel scan scan before ieee80211_scan_work() could run.
* To handle that, simply return if the scan is not running.
*/
- /* FIXME: for now, we ignore this race for UMAC scans, since
- * they don't set the scan_status.
- */
- if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
- (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- iwl_mvm_cancel_scan(mvm);
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
mutex_unlock(&mvm->mutex);
}
@@ -2794,35 +2818,17 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_scan_ies *ies)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
- ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
- if (ret)
- return ret;
- }
+ int ret;
mutex_lock(&mvm->mutex);
- if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
- IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
- ret = -EBUSY;
- goto out;
- }
-
if (!vif->bss_conf.idle) {
ret = -EBUSY;
goto out;
}
- if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
- if (ret)
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
out:
mutex_unlock(&mvm->mutex);
@@ -2845,16 +2851,12 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
* could run. To handle this, simply return if the scan is
* not running.
*/
- /* FIXME: for now, we ignore this race for UMAC scans, since
- * they don't set the scan_status.
- */
- if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
- !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
mutex_unlock(&mvm->mutex);
return 0;
}
- ret = iwl_mvm_scan_offload_stop(mvm, false);
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
@@ -2883,7 +2885,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
- WARN_ON_ONCE(!(hw->flags & IEEE80211_HW_MFP_CAPABLE));
+ WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -2922,8 +2924,21 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
}
+ /* During FW restart, in order to restore the state as it was,
+ * don't try to reprogram keys we previously failed for.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ IWL_DEBUG_MAC80211(mvm,
+ "skip invalid idx key programming during restart\n");
+ ret = 0;
+ break;
+ }
+
IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
- ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+ ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status));
if (ret) {
IWL_WARN(mvm, "set key failed\n");
/*
@@ -3001,7 +3016,7 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
+#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -3106,8 +3121,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
@@ -3899,7 +3914,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
- if (!(mvm->fw->ucode_capa.capa[0] &
+ if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
@@ -3946,8 +3961,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (!(mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
@@ -3977,9 +3992,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const struct ieee80211_event *event)
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
{
#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
do { \
@@ -3988,7 +4003,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
} while (0)
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
@@ -4032,6 +4046,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
#undef CHECK_MLME_TRIGGER
}
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "BAR received from %pM, tid %d, ssn %d",
+ event->u.ba.sta->addr, event->u.ba.tid,
+ event->u.ba.ssn);
+}
+
+static void
+iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "Frame from %pM timed out, tid %d",
+ event->u.ba.sta->addr, event->u.ba.tid);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_event *event)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ switch (event->type) {
+ case MLME_EVENT:
+ iwl_mvm_event_mlme_callback(mvm, vif, event);
+ break;
+ case BAR_RX_EVENT:
+ iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+ break;
+ case BA_FRAME_TIMEOUT:
+ iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+ break;
+ default:
+ break;
+ }
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index cf70f681d1ac..2d4bad5fe825 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,7 @@
#include "iwl-notif-wait.h"
#include "iwl-eeprom-parse.h"
#include "iwl-fw-file.h"
+#include "iwl-config.h"
#include "sta.h"
#include "fw-api.h"
#include "constants.h"
@@ -275,6 +276,7 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_UCODE_DOWN,
IWL_MVM_REF_SCAN,
IWL_MVM_REF_ROC,
+ IWL_MVM_REF_ROC_AUX,
IWL_MVM_REF_P2P_CLIENT,
IWL_MVM_REF_AP_IBSS,
IWL_MVM_REF_USER,
@@ -445,10 +447,26 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
extern const u8 tid_to_mac80211_ac[];
+#define IWL_MVM_SCAN_STOPPING_SHIFT 8
+
enum iwl_scan_status {
- IWL_MVM_SCAN_NONE,
- IWL_MVM_SCAN_OS,
- IWL_MVM_SCAN_SCHED,
+ IWL_MVM_SCAN_REGULAR = BIT(0),
+ IWL_MVM_SCAN_SCHED = BIT(1),
+ IWL_MVM_SCAN_NETDETECT = BIT(2),
+
+ IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
+ IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
+ IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+
+ IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
+ IWL_MVM_SCAN_STOPPING_REGULAR,
+ IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED |
+ IWL_MVM_SCAN_STOPPING_SCHED,
+ IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
+ IWL_MVM_SCAN_STOPPING_NETDETECT,
+
+ IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+ IWL_MVM_SCAN_MASK = 0xff,
};
/**
@@ -463,49 +481,6 @@ struct iwl_nvm_section {
const u8 *data;
};
-/*
- * Tx-backoff threshold
- * @temperature: The threshold in Celsius
- * @backoff: The tx-backoff in uSec
- */
-struct iwl_tt_tx_backoff {
- s32 temperature;
- u32 backoff;
-};
-
-#define TT_TX_BACKOFF_SIZE 6
-
-/**
- * struct iwl_tt_params - thermal throttling parameters
- * @ct_kill_entry: CT Kill entry threshold
- * @ct_kill_exit: CT Kill exit threshold
- * @ct_kill_duration: The time intervals (in uSec) in which the driver needs
- * to checks whether to exit CT Kill.
- * @dynamic_smps_entry: Dynamic SMPS entry threshold
- * @dynamic_smps_exit: Dynamic SMPS exit threshold
- * @tx_protection_entry: TX protection entry threshold
- * @tx_protection_exit: TX protection exit threshold
- * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
- * @support_ct_kill: Support CT Kill?
- * @support_dynamic_smps: Support dynamic SMPS?
- * @support_tx_protection: Support tx protection?
- * @support_tx_backoff: Support tx-backoff?
- */
-struct iwl_tt_params {
- s32 ct_kill_entry;
- s32 ct_kill_exit;
- u32 ct_kill_duration;
- s32 dynamic_smps_entry;
- s32 dynamic_smps_exit;
- s32 tx_protection_entry;
- s32 tx_protection_exit;
- struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
- bool support_ct_kill;
- bool support_dynamic_smps;
- bool support_tx_protection;
- bool support_tx_backoff;
-};
-
/**
* struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
@@ -520,7 +495,7 @@ struct iwl_mvm_tt_mgmt {
bool dynamic_smps;
u32 tx_backoff;
u32 min_backoff;
- const struct iwl_tt_params *params;
+ struct iwl_tt_params params;
bool throttle;
};
@@ -647,13 +622,15 @@ struct iwl_mvm {
u32 rts_threshold;
/* Scan status, cmd (pre-allocated) and auxiliary station */
- enum iwl_scan_status scan_status;
+ unsigned int scan_status;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+ /* max number of simultaneous scans the FW supports */
+ unsigned int max_scans;
+
/* UMAC scan tracking */
- u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
- u8 scan_seq_num, sched_scan_seq_num;
+ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -843,6 +820,8 @@ struct iwl_mvm {
} tdls_cs;
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+
+ u32 ciphers[6];
};
/* Extract MVM priv from op_mode and _hw */
@@ -912,14 +891,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
return mvm->trans->cfg->d0i3 &&
mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
!iwlwifi_mod_params.d0i3_disable &&
- (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{
bool nvm_lar = mvm->nvm_data->lar_enabled;
- bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+ bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
if (iwlwifi_mod_params.lar_disable)
return false;
@@ -936,24 +916,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
{
- return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
- mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
}
static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
{
- return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
+ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
}
static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
{
- return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
IWL_MVM_BT_COEX_CORUNNING;
}
static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
{
- return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
IWL_MVM_BT_COEX_RRC;
}
@@ -1083,8 +1067,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
@@ -1093,8 +1075,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
@@ -1146,48 +1126,38 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
struct ieee80211_vif *disabled_vif);
/* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies);
int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
/* Scheduled scan */
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req);
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd);
-
-/* Unified scan */
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req);
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type);
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* UMAC scan */
int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req);
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies);
int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 87b2a30a2308..2a6be350704a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
lar_enabled = !iwlwifi_mod_params.lar_disable &&
- (mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
regulatory, mac_override, phy_sku,
@@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
kfree(nvm_buffer);
}
- /* load external NVM if configured */
+ /* Only if PNVM selected in the mod param - load external NVM */
if (mvm->nvm_file_name) {
- /* read External NVM file - take the default */
+ /* read External NVM file from the mod param */
ret = iwl_mvm_read_external_nvm(mvm);
if (ret) {
/* choose the nvm_file name according to the
@@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
char mcc[3];
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- tlv_lar = mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+ tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
if (tlv_lar != nvm_lar)
IWL_INFO(mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 2ea01238754e..e4fa50075ffd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ if (!mvm->trans->cfg->apmg_not_supported)
iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -238,15 +238,16 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
- iwl_mvm_rx_scan_offload_iter_complete_notif, false),
+ iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_scan_offload_complete_notif, true),
- RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
+ iwl_mvm_rx_lmac_scan_complete_notif, true),
+ RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
false),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
true),
+ RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+ iwl_mvm_rx_umac_scan_iter_complete_notif, false),
- RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
@@ -280,17 +281,11 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BINDING_CONTEXT_CMD),
CMD(TIME_QUOTA_CMD),
CMD(NON_QOS_TX_COUNTER_CMD),
- CMD(RADIO_VERSION_NOTIFICATION),
- CMD(SCAN_REQUEST_CMD),
- CMD(SCAN_ABORT_CMD),
- CMD(SCAN_START_NOTIFICATION),
- CMD(SCAN_RESULTS_NOTIFICATION),
- CMD(SCAN_COMPLETE_NOTIFICATION),
+ CMD(DC2DC_CONFIG_CMD),
CMD(NVM_ACCESS_CMD),
CMD(PHY_CONFIGURATION_CMD),
CMD(CALIB_RES_NOTIF_PHY_DB),
CMD(SET_CALIB_DEFAULT_CMD),
- CMD(CALIBRATION_COMPLETE_NOTIFICATION),
CMD(ADD_STA_KEY),
CMD(ADD_STA),
CMD(REMOVE_STA),
@@ -359,6 +354,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
CMD(TDLS_CONFIG_CMD),
CMD(MCC_UPDATE_CMD),
+ CMD(SCAN_ITERATION_COMPLETE_UMAC),
};
#undef CMD
@@ -520,15 +516,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = calc_min_backoff(trans, cfg);
iwl_mvm_tt_initialize(mvm, min_backoff);
- /* set the nvm_file_name according to priority */
- if (iwlwifi_mod_params.nvm_file) {
+
+ if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
- } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
- if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
- mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
- else
- mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
- }
+ else
+ IWL_DEBUG_EEPROM(mvm->trans->dev,
+ "working without external nvm file\n");
if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
"not allowing power-up and not having nvm_file\n"))
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 33cd68ae7bf9..daff1d0a8e4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -138,7 +138,7 @@ struct rs_tx_column;
typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col);
struct rs_tx_column {
@@ -150,14 +150,14 @@ struct rs_tx_column {
};
static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
}
static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
struct iwl_mvm_sta *mvmsta;
@@ -187,7 +187,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
if (!sta->ht_cap.ht_supported)
@@ -197,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl,
+ struct rs_rate *rate,
const struct rs_tx_column *next_col)
{
- struct rs_rate *rate = &tbl->rate;
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
@@ -1128,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
- bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_LQ_SS_PARAMS;
+ bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_LQ_SS_PARAMS);
/* Treat uninitialized rate scaling data same as non-existing. */
if (!lq_sta) {
@@ -1659,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
allow_func = next_col->checks[j];
- if (allow_func && !allow_func(mvm, sta, tbl, next_col))
+ if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+ next_col))
break;
}
@@ -2136,7 +2136,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
}
/* current tx rate */
- index = lq_sta->last_txrate_idx;
+ index = rate->index;
/* rates available for this association, and for modulation mode */
rate_mask = rs_get_supported_rates(lq_sta, rate);
@@ -2184,14 +2184,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
* or search for a new one? */
rs_stay_in_table(lq_sta, false);
- goto out;
- }
- /* Else we have enough samples; calculate estimate of
- * actual average throughput */
- if (window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128)) {
- window->average_tpt = ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128);
+ return;
}
/* If we are searching for better modulation mode, check success. */
@@ -2403,9 +2396,6 @@ lq_update:
rs_set_stay_in_table(mvm, 0, lq_sta);
}
}
-
-out:
- lq_sta->last_txrate_idx = index;
}
struct rs_init_rate_info {
@@ -2548,7 +2538,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rate = &tbl->rate;
rs_get_initial_rate(mvm, lq_sta, band, rate);
- lq_sta->last_txrate_idx = rate->index;
WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
if (rate->ant == ANT_A)
@@ -2725,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
(vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
lq_sta->stbc_capable = true;
- if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
(num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
lq_sta->bfer_capable = true;
@@ -3009,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
/* TODO: remove old API when min FW API hits 14 */
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
rs_stbc_allow(mvm, sta, lq_sta))
rate.stbc = true;
@@ -3223,12 +3212,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
- if (num_of_ant(initial_rate->ant) == 1)
- lq_cmd->single_stream_ant_msk = initial_rate->ant;
-
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index e4aa9346a231..2a3da314305a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -322,8 +322,6 @@ struct iwl_lq_sta {
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
u8 tx_agg_tid_en;
- /* used to be in sta_info */
- int last_txrate_idx;
/* last tx rate_n_flags */
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index d6314ddf57b5..8f1d93b7a13a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
};
u32 temperature;
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
if (iwl_rx_packet_payload_len(pkt) != v10_len)
@@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
/* Only handle rx statistics temperature changes if async temp
* notifications are not supported
*/
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
iwl_mvm_tt_temp_changed(mvm, temperature);
ieee80211_iterate_active_interfaces(mvm->hw,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 1075a213bd6a..5de144968723 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -67,11 +67,8 @@
#include <net/mac80211.h>
#include "mvm.h"
-#include "iwl-eeprom-parse.h"
#include "fw-api-scan.h"
-#define IWL_PLCP_QUIET_THRESH 1
-#define IWL_ACTIVE_QUIET_TIME 10
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -79,23 +76,31 @@ struct iwl_mvm_scan_params {
u32 max_out_time;
u32 suspend_time;
bool passive_fragmented;
+ u32 n_channels;
+ u16 delay;
+ int n_ssids;
+ struct cfg80211_ssid *ssids;
+ struct ieee80211_channel **channels;
+ u16 interval; /* interval between scans (in secs) */
+ u32 flags;
+ u8 *mac_addr;
+ u8 *mac_addr_mask;
+ bool no_cck;
+ bool pass_all;
+ int n_match_sets;
+ struct iwl_scan_probe_req preq;
+ struct cfg80211_match_set *match_sets;
struct _dwell {
u16 passive;
u16 active;
u16 fragmented;
} dwell[IEEE80211_NUM_BANDS];
+ struct {
+ u8 iterations;
+ u8 full_scan_mul; /* not used for UMAC */
+ } schedule[2];
};
-enum iwl_umac_scan_uid_type {
- IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0),
- IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1),
- IWL_UMAC_SCAN_UID_ALL = IWL_UMAC_SCAN_UID_REG_SCAN |
- IWL_UMAC_SCAN_UID_SCHED_SCAN,
-};
-
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type, bool notify);
-
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@@ -143,28 +148,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
}
/*
- * We insert the SSIDs in an inverted order, because the FW will
- * invert it back. The most prioritized SSID, which is first in the
- * request list, is not copied here, but inserted directly to the probe
- * request.
- */
-static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
- struct cfg80211_ssid *ssids,
- int n_ssids, int first)
-{
- int fw_idx, req_idx;
-
- for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
- req_idx--, fw_idx++) {
- cmd_ssid[fw_idx].id = WLAN_EID_SSID;
- cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
- memcpy(cmd_ssid[fw_idx].ssid,
- ssids[req_idx].ssid,
- ssids[req_idx].ssid_len);
- }
-}
-
-/*
* If req->n_ssids > 0, it means we should do an active scan.
* In case of active scan w/o directed scan, we receive a zero-length SSID
* just to notify that this scan is active and not passive.
@@ -177,7 +160,7 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band, int n_ssids)
{
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
@@ -187,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band)
{
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@@ -203,10 +186,9 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
*global_cnt += 1;
}
-static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int n_ssids, u32 flags,
- struct iwl_mvm_scan_params *params)
+static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
{
int global_cnt = 0;
enum ieee80211_band band;
@@ -216,7 +198,6 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_cnt);
-
if (!global_cnt)
goto not_bound;
@@ -224,8 +205,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
params->max_out_time = 120;
if (iwl_mvm_low_latency(mvm)) {
- if (mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+
params->suspend_time = 105;
/*
* If there is more than one active interface make
@@ -239,8 +221,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
}
}
- if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
- IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+ if (frag_passive_dwell &&
+ fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
/*
* P2P device scan should not be fragmented to avoid negative
* impact on P2P device discovery. Configure max_out_time to be
@@ -257,7 +240,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
}
}
- if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+ if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+ (params->max_out_time > 200))
params->max_out_time = 200;
not_bound:
@@ -268,20 +252,34 @@ not_bound:
params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
band);
- params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
- n_ssids);
+ params->dwell[band].active =
+ iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
}
+
+ IWL_DEBUG_SCAN(mvm,
+ "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
+ params->max_out_time, params->suspend_time,
+ params->passive_fragmented);
+ IWL_DEBUG_SCAN(mvm,
+ "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
+ params->dwell[IEEE80211_BAND_2GHZ].passive,
+ params->dwell[IEEE80211_BAND_2GHZ].active,
+ params->dwell[IEEE80211_BAND_2GHZ].fragmented);
+ IWL_DEBUG_SCAN(mvm,
+ "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
+ params->dwell[IEEE80211_BAND_5GHZ].passive,
+ params->dwell[IEEE80211_BAND_5GHZ].active,
+ params->dwell[IEEE80211_BAND_5GHZ].fragmented);
}
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
- return mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
}
-static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
- bool is_sched_scan)
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
{
int max_probe_len;
@@ -297,9 +295,9 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
return max_probe_len;
}
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
{
- int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
+ int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
/* TODO: [BUG] This function should return the maximum allowed size of
* scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
@@ -314,22 +312,41 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
return max_ie_len;
}
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
+ int num_res, u8 *buf, size_t buf_size)
+{
+ int i;
+ u8 *pos = buf, *end = buf + buf_size;
+
+ for (i = 0; pos < end && i < num_res; i++)
+ pos += snprintf(pos, end - pos, " %u", res[i].channel);
+
+ /* terminate the string in case the buffer was too short */
+ *(buf + buf_size - 1) = '\0';
+
+ return buf;
+}
+
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+ u8 buf[256];
IWL_DEBUG_SCAN(mvm,
- "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
- notif->status, notif->scanned_channels);
+ "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
+ notif->status, notif->scanned_channels,
+ iwl_mvm_dump_channel_list(notif->results,
+ notif->scanned_channels, buf,
+ sizeof(buf)));
return 0;
}
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
ieee80211_sched_scan_results(mvm->hw);
@@ -337,41 +354,78 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
return 0;
}
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb,
- struct iwl_device_cmd *cmd)
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_periodic_scan_complete *scan_notif;
+ switch (status) {
+ case IWL_SCAN_EBS_SUCCESS:
+ return "successful";
+ case IWL_SCAN_EBS_INACTIVE:
+ return "inactive";
+ case IWL_SCAN_EBS_FAILED:
+ case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+ default:
+ return "failed";
+ }
+}
- scan_notif = (void *)pkt->data;
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+ bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
/* scan status must be locked for proper checking */
lockdep_assert_held(&mvm->mutex);
- IWL_DEBUG_SCAN(mvm,
- "%s completed, status %s, EBS status %s\n",
- mvm->scan_status == IWL_MVM_SCAN_SCHED ?
- "Scheduled scan" : "Scan",
- scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted",
- scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
- "success" : "failed");
+ /* We first check if we were stopping a scan, in which case we
+ * just clear the stopping flag. Then we check if it was a
+ * firmware initiated stop, in which case we need to inform
+ * mac80211.
+ * Note that we can have a stopping and a running scan
+ * simultaneously, but we can't have two different types of
+ * scans stopping or running at the same time (since LMAC
+ * doesn't support it).
+ */
+
+ if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
- /* only call mac80211 completion if the stop was initiated by FW */
- if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+ WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+ IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw);
- } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+ aborted ? "aborted" : "completed",
+ iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+ mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
ieee80211_scan_completed(mvm->hw,
scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
}
- if (scan_notif->ebs_status)
- mvm->last_ebs_successful = false;
+ mvm->last_ebs_successful =
+ scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+ scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
return 0;
}
@@ -390,9 +444,12 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
return -1;
}
-static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
- struct iwl_ssid_ie *direct_scan,
- u32 *ssid_bitmap, bool basic_ssid)
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+ struct iwl_ssid_ie *ssids,
+ u32 *ssid_bitmap)
{
int i, j;
int index;
@@ -402,39 +459,41 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* iwl_config_sched_scan_profiles() uses the order of these ssids to
* config match list.
*/
- for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ for (i = 0, j = params->n_match_sets - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
/* skip empty SSID matchsets */
- if (!req->match_sets[i].ssid.ssid_len)
+ if (!params->match_sets[j].ssid.ssid_len)
continue;
- direct_scan[i].id = WLAN_EID_SSID;
- direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
- memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
- direct_scan[i].len);
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->match_sets[j].ssid.ssid_len;
+ memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+ ssids[i].len);
}
/* add SSIDs from scan SSID list */
*ssid_bitmap = 0;
- for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
- index = iwl_ssid_exist(req->ssids[j].ssid,
- req->ssids[j].ssid_len,
- direct_scan);
+ for (j = params->n_ssids - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ index = iwl_ssid_exist(params->ssids[j].ssid,
+ params->ssids[j].ssid_len,
+ ssids);
if (index < 0) {
- if (!req->ssids[j].ssid_len && basic_ssid)
- continue;
- direct_scan[i].id = WLAN_EID_SSID;
- direct_scan[i].len = req->ssids[j].ssid_len;
- memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
- direct_scan[i].len);
- *ssid_bitmap |= BIT(i + 1);
- i++;
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->ssids[j].ssid_len;
+ memcpy(ssids[i].ssid, params->ssids[j].ssid,
+ ssids[i].len);
+ *ssid_bitmap |= BIT(i);
} else {
- *ssid_bitmap |= BIT(index + 1);
+ *ssid_bitmap |= BIT(index);
}
}
}
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
- struct cfg80211_sched_scan_request *req)
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+ struct cfg80211_sched_scan_request *req)
{
struct iwl_scan_offload_profile *profile;
struct iwl_scan_offload_profile_cfg *profile_cfg;
@@ -515,30 +574,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
return true;
}
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
-{
- int ret;
-
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
- if (ret)
- return ret;
- ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
- } else {
- mvm->scan_status = IWL_MVM_SCAN_SCHED;
- ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
- if (ret)
- return ret;
- ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
- }
-
- return ret;
-}
-
-static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
{
int ret;
struct iwl_host_cmd cmd = {
@@ -546,12 +582,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
};
u32 status;
- /* Exit instantly with error when device is not ready
- * to receive scan abort command or it does not perform
- * scheduled scan currently */
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return -EIO;
-
ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
if (ret)
return ret;
@@ -571,69 +601,9 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
return ret;
}
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
-{
- int ret;
- struct iwl_notification_wait wait_scan_done;
- static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
- bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
- return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
- notify);
-
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return 0;
-
- if (iwl_mvm_is_radio_killed(mvm)) {
- ret = 0;
- goto out;
- }
-
- iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
- scan_done_notif,
- ARRAY_SIZE(scan_done_notif),
- NULL, NULL);
-
- ret = iwl_mvm_send_scan_offload_abort(mvm);
- if (ret) {
- IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
- sched ? "offloaded " : "", ret);
- iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
- goto out;
- }
-
- IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
- sched ? "offloaded " : "");
-
- ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-out:
- /*
- * Clear the scan status so the next scan requests will succeed. This
- * also ensures the Rx handler doesn't do anything, as the scan was
- * stopped from above. Since the rx handler won't do anything now,
- * we have to release the scan reference here.
- */
- if (mvm->scan_status == IWL_MVM_SCAN_OS)
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
- mvm->scan_status = IWL_MVM_SCAN_NONE;
-
- if (notify) {
- if (sched)
- ieee80211_sched_scan_stopped(mvm->hw);
- else
- ieee80211_scan_completed(mvm->hw, true);
- }
-
- return ret;
-}
-
-static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
- struct iwl_scan_req_tx_cmd *tx_cmd,
- bool no_cck)
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+ struct iwl_scan_req_tx_cmd *tx_cmd,
+ bool no_cck)
{
tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
@@ -654,7 +624,7 @@ static void
iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
struct ieee80211_channel **channels,
int n_channels, u32 ssid_bitmap,
- struct iwl_scan_req_unified_lmac *cmd)
+ struct iwl_scan_req_lmac *cmd)
{
struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
int i;
@@ -707,13 +677,14 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
}
static void
-iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_scan_ies *ies,
- struct iwl_scan_probe_req *preq,
- const u8 *mac_addr, const u8 *mac_addr_mask)
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_scan_ies *ies,
+ struct iwl_mvm_scan_params *params)
{
- struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
+ struct ieee80211_mgmt *frame = (void *)params->preq.buf;
u8 *pos, *newpos;
+ const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ params->mac_addr : NULL;
/*
* Unfortunately, right now the offload scan doesn't support randomising
@@ -722,7 +693,8 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* random, only when it's restarted, but at least that helps a bit.
*/
if (mac_addr)
- get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
+ get_random_mask_addr(frame->sa, mac_addr,
+ params->mac_addr_mask);
else
memcpy(frame->sa, vif->addr, ETH_ALEN);
@@ -735,245 +707,167 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*pos++ = WLAN_EID_SSID;
*pos++ = 0;
- preq->mac_header.offset = 0;
- preq->mac_header.len = cpu_to_le16(24 + 2);
+ params->preq.mac_header.offset = 0;
+ params->preq.mac_header.len = cpu_to_le16(24 + 2);
/* Insert ds parameter set element on 2.4 GHz band */
newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
ies->ies[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
pos);
- preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
- preq->band_data[0].len = cpu_to_le16(newpos - pos);
+ params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
pos = newpos;
memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ]);
- preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
- preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
+ params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[1].len =
+ cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
pos += ies->len[IEEE80211_BAND_5GHZ];
memcpy(pos, ies->common_ies, ies->common_ie_len);
- preq->common_data.offset = cpu_to_le16(pos - preq->buf);
- preq->common_data.len = cpu_to_le16(ies->common_ie_len);
+ params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
}
-static void
-iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
- struct iwl_scan_req_unified_lmac *cmd,
- struct iwl_mvm_scan_params *params)
+static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
+ enum iwl_scan_priority_ext prio)
+{
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
+ return cpu_to_le32(prio);
+
+ if (prio <= IWL_SCAN_PRIORITY_EXT_2)
+ return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+ if (prio <= IWL_SCAN_PRIORITY_EXT_4)
+ return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
+
+ return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+ struct iwl_scan_req_lmac *cmd,
+ struct iwl_mvm_scan_params *params)
{
- memset(cmd, 0, ksize(cmd));
cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
cmd->fragmented_dwell =
params->dwell[IEEE80211_BAND_2GHZ].fragmented;
- cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
- cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
- cmd->iter_num = cpu_to_le32(1);
-
- if (iwl_mvm_rrm_scan_needed(mvm))
- cmd->scan_flags |=
- cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
+ cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
}
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req)
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+ struct ieee80211_scan_ies *ies,
+ int n_channels)
{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_OFFLOAD_REQUEST_CMD,
- .len = { sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req), },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
- struct iwl_scan_probe_req *preq;
- struct iwl_mvm_scan_params params = {};
- u32 flags;
- u32 ssid_bitmap = 0;
- int ret, i;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- if (req->req.n_ssids > PROBE_OPTION_MAX ||
- req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
- req->ies.len[NL80211_BAND_5GHZ] >
- iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
- req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
- return -ENOBUFS;
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+ (ies->common_ie_len +
+ ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] <=
+ iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
- mvm->scan_status = IWL_MVM_SCAN_OS;
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int n_iterations)
+{
+ const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
- iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
- &params);
+ /* We can only use EBS if:
+ * 1. the feature is supported;
+ * 2. the last EBS was successful;
+ * 3. if only single scan, the single scan EBS API is supported;
+ * 4. it's not a p2p find operation.
+ */
+ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+ mvm->last_ebs_successful &&
+ (n_iterations > 1 ||
+ fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE);
+}
- iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
+static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
+{
+ return params->schedule[0].iterations + params->schedule[1].iterations;
+}
- cmd->n_channels = (u8)req->req.n_channels;
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params)
+{
+ int flags = 0;
- flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ if (params->n_ssids == 0)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
- if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
- if (params.passive_fragmented)
+ if (params->passive_fragmented)
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
- if (req->req.n_ssids == 0)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
-
- cmd->scan_flags |= cpu_to_le32(flags);
-
- cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
- cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
- MAC_FILTER_IN_BEACON);
- iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
- iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
- req->req.n_ssids, 0);
-
- cmd->schedule[0].delay = 0;
- cmd->schedule[0].iterations = 1;
- cmd->schedule[0].full_scan_mul = 0;
- cmd->schedule[1].delay = 0;
- cmd->schedule[1].iterations = 0;
- cmd->schedule[1].full_scan_mul = 0;
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
- mvm->last_ebs_successful) {
- cmd->channel_opt[0].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[0].non_ebs_ratio =
- cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
- cmd->channel_opt[1].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[1].non_ebs_ratio =
- cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
- }
-
- for (i = 1; i <= req->req.n_ssids; i++)
- ssid_bitmap |= BIT(i);
-
- iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
- req->req.n_channels, ssid_bitmap,
- cmd);
+ if (iwl_mvm_rrm_scan_needed(mvm))
+ flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
- preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels);
+ if (params->pass_all)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
- iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
- req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->req.mac_addr : NULL,
- req->req.mac_addr_mask);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
- if (!ret) {
- IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ret = -EIO;
- }
- return ret;
+ return flags;
}
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params)
{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_OFFLOAD_REQUEST_CMD,
- .len = { sizeof(struct iwl_scan_req_unified_lmac) +
- sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels +
- sizeof(struct iwl_scan_probe_req), },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
- struct iwl_scan_probe_req *preq;
- struct iwl_mvm_scan_params params = {};
- int ret;
- u32 flags = 0, ssid_bitmap = 0;
+ struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+ struct iwl_scan_probe_req *preq =
+ (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+ mvm->fw->ucode_capa.n_scan_channels);
+ u32 ssid_bitmap = 0;
+ int n_iterations = iwl_mvm_scan_total_iterations(params);
lockdep_assert_held(&mvm->mutex);
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- if (req->n_ssids > PROBE_OPTION_MAX ||
- ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] >
- iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
- req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
- return -ENOBUFS;
-
- iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
-
- iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
-
- cmd->n_channels = (u8)req->n_channels;
-
- cmd->delay = cpu_to_le32(req->delay);
-
- if (iwl_mvm_scan_pass_all(mvm, req))
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
- else
- flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
+ memset(cmd, 0, ksize(cmd));
- if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+ iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
- if (params.passive_fragmented)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
-
- if (req->n_ssids == 0)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+ cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+ cmd->iter_num = cpu_to_le32(1);
+ cmd->n_channels = (u8)params->n_channels;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvm->scan_iter_notif_enabled)
- flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
-#endif
+ cmd->delay = cpu_to_le32(params->delay);
- cmd->scan_flags |= cpu_to_le32(flags);
+ cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
- cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
+ cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
- iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
- iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
+ iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+ iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
- cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
- cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
- cmd->schedule[0].full_scan_mul = 1;
+ /* this API uses bits 1-20 instead of 0-19 */
+ ssid_bitmap <<= 1;
- cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
- cmd->schedule[1].iterations = 0xff;
- cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ cmd->schedule[0].delay = cpu_to_le16(params->interval);
+ cmd->schedule[0].iterations = params->schedule[0].iterations;
+ cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+ cmd->schedule[1].delay = cpu_to_le16(params->interval);
+ cmd->schedule[1].iterations = params->schedule[1].iterations;
+ cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful) {
+ if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
cmd->channel_opt[0].flags =
cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -988,61 +882,14 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
}
- iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
- ssid_bitmap, cmd);
-
- preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
- mvm->fw->ucode_capa.n_scan_channels);
+ iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap, cmd);
- iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
- req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->mac_addr : NULL,
- req->mac_addr_mask);
+ *preq = params->preq;
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
- if (!ret) {
- IWL_DEBUG_SCAN(mvm,
- "Sched scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- ret = -EIO;
- }
- return ret;
-}
-
-
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
-{
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
- return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
- true);
-
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- return 0;
-
- if (iwl_mvm_is_radio_killed(mvm)) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- mvm->scan_status = IWL_MVM_SCAN_NONE;
- return 0;
- }
-
- return iwl_mvm_scan_offload_stop(mvm, true);
+ return 0;
}
-/* UMAC scan API */
-
-struct iwl_umac_scan_done {
- struct iwl_mvm *mvm;
- enum iwl_umac_scan_uid_type type;
-};
-
static int rate_to_scan_rate_flag(unsigned int rate)
{
static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
@@ -1151,79 +998,21 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return ret;
}
-static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
-{
- int i;
-
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
- if (mvm->scan_uid[i] == uid)
- return i;
-
- return i;
-}
-
-static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
-{
- return iwl_mvm_find_scan_uid(mvm, 0);
-}
-
-static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type)
-{
- int i;
-
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
- if (mvm->scan_uid[i] & type)
- return true;
-
- return false;
-}
-
-static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type)
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
{
int i;
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
- if (mvm->scan_uid[i] & type)
+ for (i = 0; i < mvm->max_scans; i++)
+ if (mvm->scan_uid_status[i] == status)
return i;
- return i;
+ return -ENOENT;
}
-static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type)
-{
- u32 uid;
-
- /* make sure exactly one bit is on in scan type */
- WARN_ON(hweight8(type) != 1);
-
- /*
- * Make sure scan uids are unique. If one scan lasts long time while
- * others are completing frequently, the seq number will wrap up and
- * we may have more than one scan with the same uid.
- */
- do {
- uid = type | (mvm->scan_seq_num <<
- IWL_UMAC_SCAN_UID_SEQ_OFFSET);
- mvm->scan_seq_num++;
- } while (iwl_mvm_find_scan_uid(mvm, uid) <
- IWL_MVM_MAX_SIMULTANEOUS_SCANS);
-
- IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
-
- return uid;
-}
-
-static void
-iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
struct iwl_scan_req_umac *cmd,
struct iwl_mvm_scan_params *params)
{
- memset(cmd, 0, ksize(cmd));
- cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
- sizeof(struct iwl_mvm_umac_cmd_hdr));
cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
if (params->passive_fragmented)
@@ -1231,7 +1020,15 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
params->dwell[IEEE80211_BAND_2GHZ].fragmented;
cmd->max_out_time = cpu_to_le32(params->max_out_time);
cmd->suspend_time = cpu_to_le32(params->suspend_time);
- cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+ cmd->scan_priority =
+ iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+
+ if (iwl_mvm_scan_total_iterations(params) == 0)
+ cmd->ooc_priority =
+ iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority =
+ iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1251,230 +1048,326 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
- struct cfg80211_ssid *ssids,
- int fragmented)
+static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+ struct iwl_mvm_scan_params *params)
{
int flags = 0;
- if (n_ssids == 0)
+ if (params->n_ssids == 0)
flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
- if (n_ssids == 1 && ssids[0].ssid_len != 0)
+ if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
- if (fragmented)
+ if (params->passive_fragmented)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+ if (iwl_mvm_scan_total_iterations(params) > 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvm->scan_iter_notif_enabled)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
return flags;
}
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_scan_request *req)
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_scan_params *params,
+ int type)
{
- struct iwl_host_cmd hcmd = {
- .id = SCAN_REQ_UMAC,
- .len = { iwl_mvm_scan_size(mvm), },
- .data = { mvm->scan_cmd, },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
- struct iwl_mvm_scan_params params = {};
- u32 uid, flags;
+ int uid;
u32 ssid_bitmap = 0;
- int ret, i, uid_idx;
+ int n_iterations = iwl_mvm_scan_total_iterations(params);
lockdep_assert_held(&mvm->mutex);
- uid_idx = iwl_mvm_find_free_scan_uid(mvm);
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
- return -EBUSY;
+ uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+ if (uid < 0)
+ return uid;
- /* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
- return -ENOMEM;
-
- if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
- req->ies.common_ie_len +
- req->ies.len[NL80211_BAND_2GHZ] +
- req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
- SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
- mvm->fw->ucode_capa.n_scan_channels))
- return -ENOBUFS;
+ memset(cmd, 0, ksize(cmd));
+ cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
+ sizeof(struct iwl_mvm_umac_cmd_hdr));
- iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
- &params);
+ iwl_mvm_scan_umac_dwell(mvm, cmd, params);
- iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+ mvm->scan_uid_status[uid] = type;
- uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
- mvm->scan_uid[uid_idx] = uid;
cmd->uid = cpu_to_le32(uid);
+ cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-
- flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
- req->req.ssids,
- params.passive_fragmented);
-
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
-
- cmd->general_flags = cpu_to_le32(flags);
-
- if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
- mvm->last_ebs_successful)
+ if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- cmd->n_channels = req->req.n_channels;
+ cmd->n_channels = params->n_channels;
+
+ iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
- for (i = 0; i < req->req.n_ssids; i++)
- ssid_bitmap |= BIT(i);
+ iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+ params->n_channels, ssid_bitmap, cmd);
- iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
- req->req.n_channels, ssid_bitmap, cmd);
+ /* With UMAC we use only one schedule for now, so use the sum
+ * of the iterations (with a a maximum of 255).
+ */
+ sec_part->schedule[0].iter_count =
+ (n_iterations > 255) ? 255 : n_iterations;
+ sec_part->schedule[0].interval = cpu_to_le16(params->interval);
- sec_part->schedule[0].iter_count = 1;
- sec_part->delay = 0;
+ sec_part->delay = cpu_to_le16(params->delay);
+ sec_part->preq = params->preq;
- iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
- req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->req.mac_addr : NULL,
- req->req.mac_addr_mask);
+ return 0;
+}
- iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
- req->req.n_ssids, 0);
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+ return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
- ret = iwl_mvm_send_cmd(mvm, &hcmd);
- if (!ret) {
- IWL_DEBUG_SCAN(mvm,
- "Scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+ /* This looks a bit arbitrary, but the idea is that if we run
+ * out of possible simultaneous scans and the userspace is
+ * trying to run a scan type that is already running, we
+ * return -EBUSY. But if the userspace wants to start a
+ * different type of scan, we stop the opposite type to make
+ * space for the new request. The reason is backwards
+ * compatibility with old wpa_supplicant that wouldn't stop a
+ * scheduled scan before starting a normal scan.
+ */
+
+ if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+ return 0;
+
+ /* Use a switch, even though this is a bitmask, so that more
+ * than one bits set will fall in default and we will warn.
+ */
+ switch (type) {
+ case IWL_MVM_SCAN_REGULAR:
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+ return -EBUSY;
+ return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+ case IWL_MVM_SCAN_SCHED:
+ if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+ return -EBUSY;
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+ case IWL_MVM_SCAN_NETDETECT:
+ /* No need to stop anything for net-detect since the
+ * firmware is restarted anyway. This way, any sched
+ * scans that were running will be restarted when we
+ * resume.
+ */
+ return 0;
+ default:
+ WARN_ON(1);
+ break;
}
- return ret;
+
+ return -EIO;
}
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_sched_scan_request *req,
- struct ieee80211_scan_ies *ies)
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
{
-
struct iwl_host_cmd hcmd = {
- .id = SCAN_REQ_UMAC,
.len = { iwl_mvm_scan_size(mvm), },
.data = { mvm->scan_cmd, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
- struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
- sizeof(struct iwl_scan_channel_cfg_umac) *
- mvm->fw->ucode_capa.n_scan_channels;
struct iwl_mvm_scan_params params = {};
- u32 uid, flags;
- u32 ssid_bitmap = 0;
- int ret, uid_idx;
+ int ret;
lockdep_assert_held(&mvm->mutex);
- uid_idx = iwl_mvm_find_free_scan_uid(mvm);
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
return -EBUSY;
+ }
+
+ ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
/* we should have failed registration if scan_cmd was NULL */
- if (WARN_ON(mvm->scan_cmd == NULL))
+ if (WARN_ON(!mvm->scan_cmd))
return -ENOMEM;
- if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
- ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
- SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
- mvm->fw->ucode_capa.n_scan_channels))
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
return -ENOBUFS;
- iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
- &params);
-
- iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
-
- cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
-
- uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
- mvm->scan_uid[uid_idx] = uid;
- cmd->uid = cpu_to_le32(uid);
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.delay = 0;
+ params.interval = 0;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = req->no_cck;
+ params.pass_all = true;
+ params.n_match_sets = 0;
+ params.match_sets = NULL;
+
+ params.schedule[0].iterations = 1;
+ params.schedule[0].full_scan_mul = 0;
+ params.schedule[1].iterations = 0;
+ params.schedule[1].full_scan_mul = 0;
+
+ iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd.id = SCAN_REQ_UMAC;
+ ret = iwl_mvm_scan_umac(mvm, vif, &params,
+ IWL_MVM_SCAN_REGULAR);
+ } else {
+ hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+ ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+ }
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+ if (ret)
+ return ret;
- flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
- params.passive_fragmented);
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+ mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+ } else {
+ /* If the scan failed, it usually means that the FW was unable
+ * to allocate the time events. Warn on it, but maybe we
+ * should try to send the command again with different params.
+ */
+ IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+ }
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+ if (ret)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- if (iwl_mvm_scan_pass_all(mvm, req))
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
- else
- flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+ return ret;
+}
- cmd->general_flags = cpu_to_le32(flags);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .len = { iwl_mvm_scan_size(mvm), },
+ .data = { mvm->scan_cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mvm_scan_params params = {};
+ int ret;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful)
- cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+ lockdep_assert_held(&mvm->mutex);
- cmd->n_channels = req->n_channels;
+ if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+ IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+ return -EBUSY;
+ }
- iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
- false);
+ ret = iwl_mvm_check_running_scans(mvm, type);
+ if (ret)
+ return ret;
- /* This API uses bits 0-19 instead of 1-20. */
- ssid_bitmap = ssid_bitmap >> 1;
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mvm->scan_cmd))
+ return -ENOMEM;
- iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
- ssid_bitmap, cmd);
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
- sec_part->schedule[0].interval =
- cpu_to_le16(req->interval / MSEC_PER_SEC);
- sec_part->schedule[0].iter_count = 0xff;
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = false;
+ params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
+ params.n_match_sets = req->n_match_sets;
+ params.match_sets = req->match_sets;
+
+ params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
+ params.schedule[0].full_scan_mul = 1;
+ params.schedule[1].iterations = 0xff;
+ params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+
+ if (req->interval > U16_MAX) {
+ IWL_DEBUG_SCAN(mvm,
+ "interval value is > 16-bits, set to max possible\n");
+ params.interval = U16_MAX;
+ } else {
+ params.interval = req->interval / MSEC_PER_SEC;
+ }
+ /* In theory, LMAC scans can handle a 32-bit delay, but since
+ * waiting for over 18 hours to start the scan is a bit silly
+ * and to keep it aligned with UMAC scans (which only support
+ * 16-bit delays), trim it down to 16-bits.
+ */
if (req->delay > U16_MAX) {
IWL_DEBUG_SCAN(mvm,
"delay value is > 16-bits, set to max possible\n");
- sec_part->delay = cpu_to_le16(U16_MAX);
+ params.delay = U16_MAX;
+ } else {
+ params.delay = req->delay;
+ }
+
+ iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+ ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+ if (ret)
+ return ret;
+
+ iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ hcmd.id = SCAN_REQ_UMAC;
+ ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
} else {
- sec_part->delay = cpu_to_le16(req->delay);
+ hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+ ret = iwl_mvm_scan_lmac(mvm, vif, &params);
}
- iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
- req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
- req->mac_addr : NULL,
- req->mac_addr_mask);
+ if (ret)
+ return ret;
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
"Sched scan request was sent successfully\n");
+ mvm->scan_status |= type;
} else {
- /*
- * If the scan failed, it usually means that the FW was unable
+ /* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
}
+
return ret;
}
@@ -1485,150 +1378,124 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
- bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
- int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+ bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
- /*
- * Scan uid may be set to zero in case of scan abort request from above.
- */
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+ if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return 0;
+ /* if the scan is already stopping, we don't need to notify mac80211 */
+ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+ ieee80211_scan_completed(mvm->hw, aborted);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+ ieee80211_sched_scan_stopped(mvm->hw);
+ }
+
+ mvm->scan_status &= ~mvm->scan_uid_status[uid];
+
IWL_DEBUG_SCAN(mvm,
- "Scan completed, uid %u type %s, status %s, EBS status %s\n",
- uid, sched ? "sched" : "regular",
+ "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+ uid, mvm->scan_uid_status[uid],
notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted",
- notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
- "success" : "failed");
+ iwl_mvm_ebs_status_str(notif->ebs_status));
- if (notif->ebs_status)
+ if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+ notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
- mvm->scan_uid[uid_idx] = 0;
-
- if (!sched) {
- ieee80211_scan_completed(mvm->hw,
- notif->status ==
- IWL_SCAN_OFFLOAD_ABORTED);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
- ieee80211_sched_scan_stopped(mvm->hw);
- } else {
- IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
- }
+ mvm->scan_uid_status[uid] = 0;
return 0;
}
-static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt, void *data)
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
{
- struct iwl_umac_scan_done *scan_done = data;
- struct iwl_umac_scan_complete *notif = (void *)pkt->data;
- u32 uid = __le32_to_cpu(notif->uid);
- int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
-
- if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
- return false;
-
- if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
- return false;
-
- /*
- * Clear scan uid of scans that was aborted from above and completed
- * in FW so the RX handler does nothing. Set last_ebs_successful here if
- * needed.
- */
- scan_done->mvm->scan_uid[uid_idx] = 0;
-
- if (notif->ebs_status)
- scan_done->mvm->last_ebs_successful = false;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+ u8 buf[256];
- return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+ IWL_DEBUG_SCAN(mvm,
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
+ notif->status, notif->scanned_channels,
+ iwl_mvm_dump_channel_list(notif->results,
+ notif->scanned_channels, buf,
+ sizeof(buf)));
+ return 0;
}
-static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
{
struct iwl_umac_scan_abort cmd = {
.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
sizeof(struct iwl_mvm_umac_cmd_hdr)),
- .uid = cpu_to_le32(uid),
};
+ int uid, ret;
lockdep_assert_held(&mvm->mutex);
+ /* We should always get a valid index here, because we already
+ * checked that this type of scan was running in the generic
+ * code.
+ */
+ uid = iwl_mvm_scan_uid_by_status(mvm, type);
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+ cmd.uid = cpu_to_le32(uid);
+
IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
- return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+ if (!ret)
+ mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+ return ret;
}
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
- enum iwl_umac_scan_uid_type type, bool notify)
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
{
struct iwl_notification_wait wait_scan_done;
- static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
- struct iwl_umac_scan_done scan_done = {
- .mvm = mvm,
- .type = type,
- };
- int i, ret = -EIO;
+ static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+ SCAN_OFFLOAD_COMPLETE, };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
- iwl_scan_umac_done_check, &scan_done);
+ NULL, NULL);
IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
- if (mvm->scan_uid[i] & type) {
- int err;
-
- if (iwl_mvm_is_radio_killed(mvm) &&
- (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- break;
- }
-
- err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
- if (!err)
- ret = 0;
- }
- }
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+ ret = iwl_mvm_umac_scan_abort(mvm, type);
+ else
+ ret = iwl_mvm_lmac_scan_abort(mvm);
if (ret) {
- IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+ IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
- if (ret)
- return ret;
-
- if (notify) {
- if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
- ieee80211_sched_scan_stopped(mvm->hw);
- if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
- ieee80211_scan_completed(mvm->hw, true);
- iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- }
- }
return ret;
}
int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return sizeof(struct iwl_scan_req_umac) +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_req_umac_tail);
- return sizeof(struct iwl_scan_req_unified_lmac) +
+ return sizeof(struct iwl_scan_req_lmac) +
sizeof(struct iwl_scan_channel_cfg_lmac) *
mvm->fw->ucode_capa.n_scan_channels +
sizeof(struct iwl_scan_probe_req);
@@ -1640,47 +1507,76 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
*/
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
{
- if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
- u32 uid, i;
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ int uid, i;
- uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
- if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS) {
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+ if (uid >= 0) {
ieee80211_scan_completed(mvm->hw, true);
- mvm->scan_uid[uid] = 0;
+ mvm->scan_uid_status[uid] = 0;
}
- uid = iwl_mvm_find_first_scan(mvm,
- IWL_UMAC_SCAN_UID_SCHED_SCAN);
- if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS && !mvm->restart_fw) {
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+ if (uid >= 0 && !mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw);
- mvm->scan_uid[uid] = 0;
+ mvm->scan_uid_status[uid] = 0;
}
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
* any is found.
*/
- for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
- if (WARN_ONCE(mvm->scan_uid[i],
- "UMAC scan UID %d was not cleaned\n",
- mvm->scan_uid[i]))
- mvm->scan_uid[i] = 0;
+ for (i = 0; i < mvm->max_scans; i++) {
+ if (WARN_ONCE(mvm->scan_uid_status[i],
+ "UMAC scan UID %d status was not cleaned\n",
+ i))
+ mvm->scan_uid_status[i] = 0;
}
} else {
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- /*
- * Sched scan will be restarted by mac80211 in
- * restart_hw, so do not report if FW is about to be
- * restarted.
- */
- if (!mvm->restart_fw)
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+
+ /* Sched scan will be restarted by mac80211 in
+ * restart_hw, so do not report if FW is about to be
+ * restarted.
+ */
+ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ }
+}
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+ int ret;
+
+ if (!(mvm->scan_status & type))
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iwl_mvm_scan_stop_wait(mvm, type);
+ if (!ret)
+ mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+ /* Clear the scan status so the next scan requests will
+ * succeed and mark the scan as stopping, so that the Rx
+ * handler doesn't do anything, as the scan was stopped from
+ * above.
+ */
+ mvm->scan_status &= ~type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ /* Since the rx handler won't do anything now, we have
+ * to release the scan reference here.
+ */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ if (notify)
+ ieee80211_scan_completed(mvm->hw, true);
+ } else if (notify) {
+ ieee80211_sched_scan_stopped(mvm->hw);
}
+
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 1845b79487c8..d68dc697a4a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -5,8 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+ iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
+ buf_size, ssn, wdg_timeout);
+
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
return -EIO;
- iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
- buf_size, ssn, wdg_timeout);
-
/*
* Even though in theory the peer could have different
* aggregation reorder buffer sizes for different sessions,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index fd7b0d36f9a6..d24b6a83e68c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* in the case that the time event actually completed in the firmware
* (which is handled in iwl_mvm_te_handle_notif).
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+ }
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
queues |= BIT(mvm->aux_queue);
-
- iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+ }
synchronize_net();
@@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
te_data->running = true;
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
} else {
IWL_DEBUG_TE(mvm,
@@ -794,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif *mvmvif = NULL;
struct iwl_mvm_time_event_data *te_data;
bool is_p2p = false;
lockdep_assert_held(&mvm->mutex);
- mvmvif = NULL;
spin_lock_bh(&mvm->time_event_lock);
/*
@@ -818,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
}
}
- /*
- * Iterate over the list of aux roc time events and find the time
- * event that is associated with a BSS interface.
- * This assumes that a BSS interface can have only a single time
- * event at any given time and this time event corresponds to a ROC
- * request
+ /* There can only be at most one AUX ROC time event, we just use the
+ * list to simplify/unify code. Remove it if it exists.
*/
- list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+ te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+ struct iwl_mvm_time_event_data,
+ list);
+ if (te_data)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- goto remove_te;
- }
remove_te:
spin_unlock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index ba615ad2176c..80d07db6e7e8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -70,7 +70,7 @@
static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
- u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
+ u32 duration = tt->params.ct_kill_duration;
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
return;
@@ -223,7 +223,7 @@ static void check_exit_ctkill(struct work_struct *work)
tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
- duration = tt->params->ct_kill_duration;
+ duration = tt->params.ct_kill_duration;
mutex_lock(&mvm->mutex);
@@ -247,7 +247,7 @@ static void check_exit_ctkill(struct work_struct *work)
IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
- if (temp <= tt->params->ct_kill_exit) {
+ if (temp <= tt->params.ct_kill_exit) {
mutex_unlock(&mvm->mutex);
iwl_mvm_exit_ctkill(mvm);
return;
@@ -325,7 +325,7 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
{
- const struct iwl_tt_params *params = mvm->thermal_throttle.params;
+ struct iwl_tt_params *params = &mvm->thermal_throttle.params;
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
s32 temperature = mvm->temperature;
bool throttle_enable = false;
@@ -340,7 +340,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
if (params->support_ct_kill &&
- temperature <= tt->params->ct_kill_exit) {
+ temperature <= params->ct_kill_exit) {
iwl_mvm_exit_ctkill(mvm);
return;
}
@@ -400,7 +400,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
}
}
-static const struct iwl_tt_params iwl7000_tt_params = {
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
.ct_kill_entry = 118,
.ct_kill_exit = 96,
.ct_kill_duration = 5,
@@ -422,38 +422,16 @@ static const struct iwl_tt_params iwl7000_tt_params = {
.support_tx_backoff = true,
};
-static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
- .ct_kill_entry = 118,
- .ct_kill_exit = 96,
- .ct_kill_duration = 5,
- .dynamic_smps_entry = 114,
- .dynamic_smps_exit = 110,
- .tx_protection_entry = 114,
- .tx_protection_exit = 108,
- .tx_backoff = {
- {.temperature = 112, .backoff = 300},
- {.temperature = 113, .backoff = 800},
- {.temperature = 114, .backoff = 1500},
- {.temperature = 115, .backoff = 3000},
- {.temperature = 116, .backoff = 5000},
- {.temperature = 117, .backoff = 10000},
- },
- .support_ct_kill = true,
- .support_dynamic_smps = true,
- .support_tx_protection = true,
- .support_tx_backoff = true,
-};
-
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
- if (mvm->cfg->high_temp)
- tt->params = &iwl7000_high_temp_tt_params;
+ if (mvm->cfg->thermal_params)
+ tt->params = *mvm->cfg->thermal_params;
else
- tt->params = &iwl7000_tt_params;
+ tt->params = iwl_mvm_default_tt_params;
tt->throttle = false;
tt->dynamic_smps = false;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index ef32e177f662..7ba7a118ff5c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,30 @@
#include "mvm.h"
#include "sta.h"
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+ u16 tid, u16 ssn)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+ return;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "BAR sent to %pM, tid %d, ssn %d",
+ addr, tid, ssn);
+}
+
/*
* Sets most of the Tx cmd's fields
*/
@@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
tx_cmd->tid_tspec = (control &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+ iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+ ssn);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -144,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
- if ((mvm->fw->ucode_capa.capa[0] &
- IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
ieee80211_action_contains_tpc(skb))
tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index bc55a8b82db6..03f8e06dded7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
struct iwl_error_event_table table;
u32 base;
- if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
iwl_mvm_dump_nic_error_log_old(mvm);
return;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index b18569734922..2ed1e4d2774d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
- iwl_enable_rfkill_int(trans);
+ /*
+ * On suspend, ict is disabled, and the interrupt mask
+ * gets cleared. Reconfigure them both in case of d0i3
+ * image. Otherwise, only enable rfkill interrupt (in
+ * order to keep track of the rfkill status)
+ */
+ if (trans->wowlan_d0i3) {
+ iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
+ } else {
+ iwl_enable_rfkill_int(trans);
+ }
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 376b84e54ad7..31f72a61cc3f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,6 +44,15 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
@@ -77,29 +86,29 @@ struct isr_statistics {
* struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
__le32 *bd;
dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read;
u32 write;
u32 free_count;
+ u32 used_count;
u32 write_actual;
struct list_head rx_free;
struct list_head rx_used;
@@ -107,6 +116,32 @@ struct iwl_rxq {
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
+ struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+ struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ * the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ * of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+ struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+ atomic_t req_pending;
+ atomic_t req_ready;
+ struct list_head rbd_allocated;
+ struct list_head rbd_empty;
+ spinlock_t lock;
+ struct workqueue_struct *alloc_wq;
+ struct work_struct rx_alloc;
};
struct iwl_dma_ptr {
@@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
@@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
- struct work_struct rx_replenish;
+ struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 7ff69c642103..a3fbaa0ef5e0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -74,16 +74,29 @@
* resets the Rx queue buffers with new memory.
*
* The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ * When the interrupt handler is called, the request is processed.
+ * The page is either stolen - transferred to the upper layer
+ * or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ * count, detaches the RBD and transfers it to the queue used list.
+ * When there are two used RBDs - they are transferred to the allocator empty
+ * list. Work is then scheduled for the allocator to start allocating
+ * eight buffers.
+ * When there are another 6 used RBDs - they are transferred to the allocator
+ * empty list and the driver tries to claim the pre-allocated buffers and
+ * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ * until ready.
+ * When there are 8+ buffers in the free list - either from allocation or from
+ * 8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ * the allocator has initial pool in the size of num_queues*(8-2) - the
+ * maximum missing RBDs per allocation request (request posted with 2
+ * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ * The queues supplies the recycle of the rest of the RBDs.
* + A received packet is processed and handed to the kernel network stack,
* detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
* the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
* If there were enough free buffers and RX_STALLED is set it is cleared.
*
@@ -92,18 +105,32 @@
*
* iwl_rxq_alloc() Allocates rx_free
* iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_pcie_rxq_restock
+ * iwl_pcie_rxq_restock.
+ * Used only during initialization.
* iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
* queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_pcie_rx_replenish
+ * the WRITE index.
+ * iwl_pcie_rx_allocator() Background work for allocating pages.
*
* -- enable interrupts --
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
* READ INDEX, detaching the SKB from the pool.
* Moves the packet buffer from queue to rx_used.
+ * Posts and claims requests to the allocator.
* Calls iwl_pcie_rxq_restock to refill any empty
* slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
* ...
*
*/
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->free_count--;
}
spin_unlock(&rxq->lock);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- schedule_work(&trans_pcie->rx_replenish);
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
@@ -255,6 +278,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
}
/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct page *page;
+ gfp_t gfp_mask = GFP_KERNEL;
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (trans_pcie->rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+ trans_pcie->rx_page_order);
+ /* Issue an error if the hardware has consumed more than half
+ * of its free buffer list and we don't have enough
+ * pre-allocated buffers.
+` */
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+ net_ratelimit())
+ IWL_CRIT(trans,
+ "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+ rxq->free_count);
+ return NULL;
+ }
+ return page;
+}
+
+/*
* iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
*
* A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- gfp_t gfp_mask = priority;
while (1) {
spin_lock(&rxq->lock);
@@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
}
spin_unlock(&rxq->lock);
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (trans_pcie->rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
/* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(trans, "alloc_pages failed, "
- "order: %d\n",
- trans_pcie->rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(trans, "Failed to alloc_pages with %s."
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
+ page = iwl_pcie_rx_alloc_page(trans);
+ if (!page)
return;
- }
spin_lock(&rxq->lock);
@@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
lockdep_assert_held(&rxq->lock);
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ for (i = 0; i < RX_QUEUE_SIZE; i++) {
if (!rxq->pool[i].page)
continue;
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
* When moving to rx_free an page is allocated for the slot.
*
* Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
*/
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
- iwl_pcie_rxq_alloc_rbs(trans, gfp);
+ iwl_pcie_rxq_alloc_rbs(trans);
iwl_pcie_rxq_restock(trans);
}
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+ while (atomic_read(&rba->req_pending)) {
+ int i;
+ struct list_head local_empty;
+ struct list_head local_allocated;
+
+ INIT_LIST_HEAD(&local_allocated);
+ spin_lock(&rba->lock);
+ /* swap out the entire rba->rbd_empty to a local list */
+ list_replace_init(&rba->rbd_empty, &local_empty);
+ spin_unlock(&rba->lock);
+
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+
+ /* List should never be empty - each reused RBD is
+ * returned to the list, and initial pool covers any
+ * possible gap between the time the page is allocated
+ * to the time the RBD is added.
+ */
+ BUG_ON(list_empty(&local_empty));
+ /* Get the first rxb from the rbd list */
+ rxb = list_first_entry(&local_empty,
+ struct iwl_rx_mem_buffer, list);
+ BUG_ON(rxb->page);
+
+ /* Alloc a new receive buffer */
+ page = iwl_pcie_rx_alloc_page(trans);
+ if (!page)
+ continue;
+ rxb->page = page;
+
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(trans->dev, page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+ rxb->page = NULL;
+ __free_pages(page, trans_pcie->rx_page_order);
+ continue;
+ }
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ /* move the allocated entry to the out list */
+ list_move(&rxb->list, &local_allocated);
+ i++;
+ }
+
+ spin_lock(&rba->lock);
+ /* add the allocated rbds to the allocator allocated list */
+ list_splice_tail(&local_allocated, &rba->rbd_allocated);
+ /* add the unused rbds back to the allocator empty list */
+ list_splice_tail(&local_empty, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ atomic_dec(&rba->req_pending);
+ atomic_inc(&rba->req_ready);
+ }
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer
+ *out[RX_CLAIM_REQ_ALLOC])
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ if (atomic_dec_return(&rba->req_ready) < 0) {
+ atomic_inc(&rba->req_ready);
+ IWL_DEBUG_RX(trans,
+ "Allocation request not ready, pending requests = %d\n",
+ atomic_read(&rba->req_pending));
+ return -ENOMEM;
+ }
+
+ spin_lock(&rba->lock);
+ for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+ /* Get next free Rx buffer, remove it from free list */
+ out[i] = list_first_entry(&rba->rbd_allocated,
+ struct iwl_rx_mem_buffer, list);
+ list_del(&out[i]->list);
+ }
+ spin_unlock(&rba->lock);
+
+ return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
{
+ struct iwl_rb_allocator *rba_p =
+ container_of(data, struct iwl_rb_allocator, rx_alloc);
struct iwl_trans_pcie *trans_pcie =
- container_of(data, struct iwl_trans_pcie, rx_replenish);
+ container_of(rba_p, struct iwl_trans_pcie, rba);
- iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+ iwl_pcie_rx_allocator(trans_pcie->trans);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
spin_lock_init(&rxq->lock);
+ spin_lock_init(&rba->lock);
if (WARN_ON(rxq->bd || rxq->rb_stts))
return -EINVAL;
@@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
rxq->free_count = 0;
+ rxq->used_count = 0;
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
list_add(&rxq->pool[i].list, &rxq->rx_used);
}
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+ int i;
+
+ lockdep_assert_held(&rba->lock);
+
+ INIT_LIST_HEAD(&rba->rbd_allocated);
+ INIT_LIST_HEAD(&rba->rbd_empty);
+
+ for (i = 0; i < RX_POOL_SIZE; i++)
+ list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int i;
+
+ lockdep_assert_held(&rba->lock);
+
+ for (i = 0; i < RX_POOL_SIZE; i++) {
+ if (!rba->pool[i].page)
+ continue;
+ dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
+ __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+ rba->pool[i].page = NULL;
+ }
+}
+
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, err;
if (!rxq->bd) {
@@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
if (err)
return err;
}
+ if (!rba->alloc_wq)
+ rba->alloc_wq = alloc_workqueue("rb_allocator",
+ WQ_HIGHPRI | WQ_UNBOUND, 1);
+ INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+ spin_lock(&rba->lock);
+ atomic_set(&rba->req_pending, 0);
+ atomic_set(&rba->req_ready, 0);
+ /* free all first - we might be reconfigured for a different size */
+ iwl_pcie_rx_free_rba(trans);
+ iwl_pcie_rx_init_rba(rba);
+ spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
- INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
/* free all first - we might be reconfigured for a different size */
iwl_pcie_rxq_free_rbs(trans);
iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+ iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
@@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return;
}
- cancel_work_sync(&trans_pcie->rx_replenish);
+ cancel_work_sync(&rba->rx_alloc);
+ if (rba->alloc_wq) {
+ destroy_workqueue(rba->alloc_wq);
+ rba->alloc_wq = NULL;
+ }
+
+ spin_lock(&rba->lock);
+ iwl_pcie_rx_free_rba(trans);
+ spin_unlock(&rba->lock);
spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
@@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rxq->rb_stts = NULL;
}
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+ /* Count the used RBDs */
+ rxq->used_count++;
+
+ /* Move the RBD to the used list, will be moved to allocator in batches
+ * before claiming or posting a request*/
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+ * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+ * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+ * after but we still need to post another request.
+ */
+ if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+ /* Move the 2 RBDs to the allocator ownership.
+ Allocator has another 6 from pool for the request completion*/
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ atomic_inc(&rba->req_pending);
+ queue_work(rba->alloc_wq, &rba->rx_alloc);
+ }
+}
+
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb)
{
@@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
- list_add_tail(&rxb->list, &rxq->rx_used);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
- list_add_tail(&rxb->list, &rxq->rx_used);
+ iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
}
/*
@@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
+ u32 r, i, j;
restart:
spin_lock(&rxq->lock);
@@ -720,14 +957,6 @@ restart:
if (i == r)
IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
@@ -739,29 +968,48 @@ restart:
iwl_pcie_rx_handle_rb(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
- count = 0;
- goto restart;
+
+ /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+ * try to claim the pre-allocated buffers from the allocator */
+ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+ struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+ /* Add the remaining 6 empty RBDs for allocator use */
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+
+ /* If not ready - continue, will try to reclaim later.
+ * No need to reschedule work - allocator exits only on
+ * success */
+ if (!iwl_pcie_rx_allocator_get(trans, out)) {
+ /* If success - then RX_CLAIM_REQ_ALLOC
+ * buffers were retrieved and should be added
+ * to free list */
+ rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+ for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+ list_add_tail(&out[j]->list,
+ &rxq->rx_free);
+ rxq->free_count++;
+ }
}
}
+ /* handle restock for two cases:
+ * - we just pulled buffers from the allocator
+ * - we have 8+ unstolen pages accumulated */
+ if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
+ rxq->read = i;
+ spin_unlock(&rxq->lock);
+ iwl_pcie_rxq_restock(trans);
+ goto restart;
+ }
}
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
- if (fill_rx)
- iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
- else
- iwl_pcie_rxq_restock(trans);
-
if (trans_pcie->napi.poll)
napi_gro_flush(&trans_pcie->napi, false);
}
@@ -775,6 +1023,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
+ !trans->cfg->apmg_not_supported &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dc179094e6a0..43ae658af6ec 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -101,14 +101,26 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
dma_addr_t phys;
- u32 size;
+ u32 size = 0;
u8 power;
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (WARN(max_power > 26,
+ "External buffer size for monitor is too big %d, check the FW TLV\n",
+ max_power))
+ return;
+
if (trans_pcie->fw_mon_page) {
dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
trans_pcie->fw_mon_size,
@@ -117,7 +129,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
}
phys = 0;
- for (power = 26; power >= 11; power--) {
+ for (power = max_power; power >= 11; power--) {
int order;
size = BIT(power);
@@ -143,6 +155,12 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
if (WARN_ON_ONCE(!page))
return;
+ if (power != max_power)
+ IWL_ERR(trans,
+ "Sorry - debug buffer is only %luK while you requested %luK\n",
+ (unsigned long)BIT(power - 10),
+ (unsigned long)BIT(max_power - 10));
+
trans_pcie->fw_mon_page = page;
trans_pcie->fw_mon_phys = phys;
trans_pcie->fw_mon_size = size;
@@ -164,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
+ if (!trans->cfg->apmg_not_supported)
+ return;
+
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@@ -297,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
* bits do not disable clocks. This preserves any hardware
* bits already set by default in "CLK_CTRL_REG" after reset.
*/
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!trans->cfg->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
@@ -497,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
- iwl_pcie_set_pwr(trans, false);
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -834,7 +854,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
get_fw_dbg_mode_string(dest->monitor_mode));
if (dest->monitor_mode == EXTERNAL_MODE)
- iwl_pcie_alloc_fw_monitor(trans);
+ iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
@@ -908,7 +928,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
- iwl_pcie_alloc_fw_monitor(trans);
+ iwl_pcie_alloc_fw_monitor(trans, 0);
if (trans_pcie->fw_mon_size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
@@ -955,12 +975,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
return ret;
/* load to FW the binary sections of CPU2 */
- ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
- &first_ucode_section);
- if (ret)
- return ret;
-
- return 0;
+ return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+ &first_ucode_section);
}
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
@@ -1049,7 +1065,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_rx_stop(trans);
/* Power-down device's busmaster DMA clocks */
- if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ if (!trans->cfg->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_DIS_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
@@ -1346,14 +1362,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
- kmem_cache_destroy(trans->dev_cmd_pool);
if (trans_pcie->napi.poll)
netif_napi_del(&trans_pcie->napi);
iwl_pcie_free_fw_monitor(trans);
- kfree(trans);
+ iwl_trans_free(trans);
}
static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
@@ -2200,6 +2215,29 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
return sizeof(**data) + fh_regs_len;
}
+static u32
+iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+ u32 monitor_len)
+{
+ u32 buf_size_in_dwords = (monitor_len >> 2);
+ u32 *buffer = (u32 *)fw_mon_data->data;
+ unsigned long flags;
+ u32 i;
+
+ if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ return 0;
+
+ __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+ for (i = 0; i < buf_size_in_dwords; i++)
+ buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
+ __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+
+ iwl_trans_release_nic_access(trans, &flags);
+
+ return monitor_len;
+}
+
static
struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
{
@@ -2252,7 +2290,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
trans->dbg_dest_tlv->end_shift;
/* Make "end" point to the actual end */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
+ trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2328,9 +2367,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
len += sizeof(*data) + sizeof(*fw_mon_data);
if (trans_pcie->fw_mon_page) {
- data->len = cpu_to_le32(trans_pcie->fw_mon_size +
- sizeof(*fw_mon_data));
-
/*
* The firmware is now asserted, it won't write anything
* to the buffer. CPU can take ownership to fetch the
@@ -2345,10 +2381,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
page_address(trans_pcie->fw_mon_page),
trans_pcie->fw_mon_size);
- len += trans_pcie->fw_mon_size;
- } else {
- /* If we are here then the buffer is internal */
-
+ monitor_len = trans_pcie->fw_mon_size;
+ } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
/*
* Update pointers to reflect actual values after
* shifting
@@ -2357,10 +2391,18 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
trans->dbg_dest_tlv->base_shift;
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
- data->len = cpu_to_le32(sizeof(*fw_mon_data) +
- monitor_len);
- len += monitor_len;
+ } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+ monitor_len =
+ iwl_trans_pci_dump_marbh_monitor(trans,
+ fw_mon_data,
+ monitor_len);
+ } else {
+ /* Didn't match anything - output no monitor data */
+ monitor_len = 0;
}
+
+ len += monitor_len;
+ data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
}
dump_data->len = len;
@@ -2419,18 +2461,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
u16 pci_cmd;
int err;
- trans = kzalloc(sizeof(struct iwl_trans) +
- sizeof(struct iwl_trans_pcie), GFP_KERNEL);
- if (!trans) {
- err = -ENOMEM;
- goto out;
- }
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+ &pdev->dev, cfg, &trans_ops_pcie, 0);
+ if (!trans)
+ return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- trans->ops = &trans_ops_pcie;
- trans->cfg = cfg;
- trans_lockdep_init(trans);
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
@@ -2554,25 +2591,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
- "iwl_cmd_pool:%s", dev_name(trans->dev));
-
- trans->dev_cmd_headroom = 0;
- trans->dev_cmd_pool =
- kmem_cache_create(trans->dev_cmd_pool_name,
- sizeof(struct iwl_device_cmd)
- + trans->dev_cmd_headroom,
- sizeof(void *),
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!trans->dev_cmd_pool) {
- err = -ENOMEM;
- goto out_pci_disable_msi;
- }
-
if (iwl_pcie_alloc_ict(trans))
- goto out_free_cmd_pool;
+ goto out_pci_disable_msi;
err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
@@ -2589,8 +2609,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
-out_free_cmd_pool:
- kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi:
pci_disable_msi(pdev);
out_pci_release_regions:
@@ -2598,7 +2616,6 @@ out_pci_release_regions:
out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
- kfree(trans);
-out:
+ iwl_trans_free(trans);
return ERR_PTR(err);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 5ef8044c2ea3..2b86c2135de3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1049,8 +1049,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
!trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- udelay(2);
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1a4d558022d8..8317afd065b4 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -835,14 +835,13 @@ static int lbs_cfg_scan(struct wiphy *wiphy,
* Events
*/
-void lbs_send_disconnect_notification(struct lbs_private *priv)
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+ bool locally_generated)
{
lbs_deb_enter(LBS_DEB_CFG80211);
- cfg80211_disconnected(priv->dev,
- 0,
- NULL, 0,
- GFP_KERNEL);
+ cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated,
+ GFP_KERNEL);
lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -1458,7 +1457,7 @@ int lbs_disconnect(struct lbs_private *priv, u16 reason)
cfg80211_disconnected(priv->dev,
reason,
- NULL, 0,
+ NULL, 0, true,
GFP_KERNEL);
priv->connect_status = LBS_DISCONNECTED;
@@ -2031,7 +2030,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
/* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, true);
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index 10995f59fe34..acccc2922401 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -10,7 +10,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev);
int lbs_cfg_register(struct lbs_private *priv);
void lbs_cfg_free(struct lbs_private *priv);
-void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+ bool locally_generated);
void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
void lbs_scan_done(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 4279e8ab95f2..0c5444b02c64 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -68,7 +68,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
/* From cmdresp.c */
-void lbs_mac_event_disconnected(struct lbs_private *priv);
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+ bool locally_generated);
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 65f18f1e869c..e5442e8956f7 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -19,10 +19,13 @@
* reset link state etc.
*
* @priv: A pointer to struct lbs_private structure
+ * @locally_generated: indicates disconnect was requested locally
+ * (usually by userspace)
*
* returns: n/a
*/
-void lbs_mac_event_disconnected(struct lbs_private *priv)
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+ bool locally_generated)
{
if (priv->connect_status != LBS_CONNECTED)
return;
@@ -36,7 +39,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
msleep_interruptible(1000);
if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
- lbs_send_disconnect_notification(priv);
+ lbs_send_disconnect_notification(priv, locally_generated);
/* report disconnect to upper layer */
netif_stop_queue(priv->dev);
@@ -229,17 +232,17 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
case MACREG_INT_CODE_DEAUTHENTICATED:
lbs_deb_cmd("EVENT: deauthenticated\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, false);
break;
case MACREG_INT_CODE_DISASSOCIATED:
lbs_deb_cmd("EVENT: disassociated\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, false);
break;
case MACREG_INT_CODE_LINK_LOST_NO_SCAN:
lbs_deb_cmd("EVENT: link lost\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, true);
break;
case MACREG_INT_CODE_PS_SLEEP:
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ed02e4bf2c26..a47f0acc099a 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -439,7 +439,7 @@ static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
return mc_count;
}
-#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
+#define SUPPORTED_FIF_FLAGS FIF_ALLMULTI
static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -458,10 +458,7 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
return;
}
- if (*new_flags & (FIF_PROMISC_IN_BSS))
- priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
- else
- priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
+ priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
if (*new_flags & (FIF_ALLMULTI) ||
multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
@@ -637,7 +634,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->tx_skb = NULL;
hw->queues = 1;
- hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
hw->extra_tx_headroom = sizeof(struct txpd);
memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index d5c0a1af08b9..99e873dc8684 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1286,7 +1286,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (control->sta)
hwsim_check_sta_magic(control->sta);
- if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+ if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(txi->control.vif, control->sta, skb,
txi->control.rates,
ARRAY_SIZE(txi->control.rates));
@@ -1395,7 +1395,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
{
u32 _pid = ACCESS_ONCE(wmediumd_portid);
- if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE) {
+ if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
ieee80211_get_tx_rates(txi->control.vif, NULL, skb,
txi->control.rates,
@@ -1432,7 +1432,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (skb == NULL)
return;
info = IEEE80211_SKB_CB(skb);
- if (hw->flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+ if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(vif, NULL, skb,
info->control.rates,
ARRAY_SIZE(info->control.rates));
@@ -1554,8 +1554,6 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
wiphy_debug(hw->wiphy, "%s\n", __func__);
data->rx_filter = 0;
- if (*total_flags & FIF_PROMISC_IN_BSS)
- data->rx_filter |= FIF_PROMISC_IN_BSS;
if (*total_flags & FIF_ALLMULTI)
data->rx_filter |= FIF_ALLMULTI;
@@ -2393,15 +2391,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
if (param->p2p_device)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
- hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_WANT_MONITOR_VIF |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
- IEEE80211_HW_CHANCTX_STA_CSA;
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
if (rctbl)
- hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
@@ -2438,6 +2437,31 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
sband->bitrates = data->rates + 4;
sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+
+ sband->vht_cap.vht_supported = true;
+ sband->vht_cap.cap =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_RXSTBC_2 |
+ IEEE80211_VHT_CAP_RXSTBC_3 |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+ sband->vht_cap.vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14);
+ sband->vht_cap.vht_mcs.tx_mcs_map =
+ sband->vht_cap.vht_mcs.rx_mcs_map;
break;
default:
continue;
@@ -2458,31 +2482,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
hw->wiphy->bands[band] = sband;
-
- sband->vht_cap.vht_supported = true;
- sband->vht_cap.cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
- IEEE80211_VHT_CAP_RXLDPC |
- IEEE80211_VHT_CAP_SHORT_GI_80 |
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
- sband->vht_cap.vht_mcs.rx_mcs_map =
- cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
- IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
- sband->vht_cap.vht_mcs.tx_mcs_map =
- sband->vht_cap.vht_mcs.rx_mcs_map;
}
/* By default all radios belong to the first group */
@@ -2510,7 +2509,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
}
if (param->no_vif)
- hw->flags |= IEEE80211_HW_NO_AUTO_VIF;
+ ieee80211_hw_set(hw, NO_AUTO_VIF);
err = ieee80211_register_hw(hw);
if (err < 0) {
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
new file mode 100644
index 000000000000..cba300c6b5da
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -0,0 +1,10 @@
+menuconfig WL_MEDIATEK
+ bool "Mediatek Wireless LAN support"
+ ---help---
+ Enable community drivers for MediaTek WiFi devices.
+ Those drivers make use of the Linux mac80211 stack.
+
+
+if WL_MEDIATEK
+source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+endif # WL_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
new file mode 100644
index 000000000000..9d5f182fd7fd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MT7601U) += mt7601u/
diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig
new file mode 100644
index 000000000000..f46bed92796b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig
@@ -0,0 +1,6 @@
+config MT7601U
+ tristate "MediaTek MT7601U (USB) support"
+ depends on MAC80211
+ depends on USB
+ ---help---
+ This adds support for MT7601U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
new file mode 100644
index 000000000000..ea9ed8a5db4d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_MT7601U) += mt7601u.o
+
+mt7601u-objs = \
+ usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
+ mac.o util.o debugfs.o tx.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c
new file mode 100644
index 000000000000..0aabd790f985
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/core.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
+{
+ int i = 100;
+ u32 val;
+
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return -EIO;
+
+ val = mt7601u_rr(dev, MT_MAC_CSR0);
+ if (val && ~val)
+ return 0;
+
+ udelay(10);
+ } while (i--);
+
+ return -EIO;
+}
+
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return false;
+
+ cur = mt7601u_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ udelay(10);
+ } while (timeout-- > 0);
+
+ dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+ return false;
+}
+
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout)
+{
+ u32 cur;
+
+ timeout /= 10;
+ do {
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return false;
+
+ cur = mt7601u_rr(dev, offset) & mask;
+ if (cur == val)
+ return true;
+
+ msleep(10);
+ } while (timeout-- > 0);
+
+ dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+ return false;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
new file mode 100644
index 000000000000..fc008475a03b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+ struct mt7601u_dev *dev = data;
+
+ mt76_wr(dev, dev->debugfs_reg, val);
+ return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+ struct mt7601u_dev *dev = data;
+
+ *val = mt76_rr(dev, dev->debugfs_reg);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+{
+ struct mt7601u_dev *dev = file->private;
+ int i, j;
+
+#define stat_printf(grp, off, name) \
+ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+ stat_printf(rx_stat, 0, rx_crc_err);
+ stat_printf(rx_stat, 1, rx_phy_err);
+ stat_printf(rx_stat, 2, rx_false_cca);
+ stat_printf(rx_stat, 3, rx_plcp_err);
+ stat_printf(rx_stat, 4, rx_fifo_overflow);
+ stat_printf(rx_stat, 5, rx_duplicate);
+
+ stat_printf(tx_stat, 0, tx_fail_cnt);
+ stat_printf(tx_stat, 1, tx_bcn_cnt);
+ stat_printf(tx_stat, 2, tx_success);
+ stat_printf(tx_stat, 3, tx_retransmit);
+ stat_printf(tx_stat, 4, tx_zero_len);
+ stat_printf(tx_stat, 5, tx_underflow);
+
+ stat_printf(aggr_stat, 0, non_aggr_tx);
+ stat_printf(aggr_stat, 1, aggr_tx);
+
+ stat_printf(zero_len_del, 0, tx_zero_len_del);
+ stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+ seq_puts(file, "Aggregations stats:\n");
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++)
+ seq_printf(file, "%08llx ",
+ dev->stats.aggr_n[i * 8 + j]);
+ seq_putc(file, '\n');
+ }
+
+ seq_printf(file, "recent average AMPDU len: %d\n",
+ atomic_read(&dev->avg_ampdu_len));
+
+ return 0;
+}
+
+static int
+mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+ .open = mt7601u_ampdu_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
+mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+{
+ struct mt7601u_dev *dev = file->private;
+ struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
+ struct tssi_data *td = &dev->ee->tssi_data;
+ int i;
+
+ seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+ seq_printf(file, "RSSI offset: %hhx %hhx\n",
+ dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
+ seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
+ seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
+ seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+ dev->ee->reg.start + dev->ee->reg.num - 1);
+
+ seq_puts(file, "Per rate power:\n");
+ for (i = 0; i < 2; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
+ for (i = 0; i < 4; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
+ for (i = 0; i < 4; i++)
+ seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+ rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
+
+ seq_puts(file, "Per channel power:\n");
+ for (i = 0; i < 7; i++)
+ seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n",
+ i * 2 + 1, dev->ee->chan_pwr[i * 2],
+ i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
+
+ if (!dev->ee->tssi_enabled)
+ return 0;
+
+ seq_puts(file, "TSSI:\n");
+ seq_printf(file, "\t slope:%02hhx\n", td->slope);
+ seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
+ td->offset[0], td->offset[1], td->offset[2]);
+ seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
+
+ return 0;
+}
+
+static int
+mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+ .open = mt7601u_eeprom_param_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
+ if (!dir)
+ return;
+
+ debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp);
+ debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode);
+
+ debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+ debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+ &fops_regval);
+ debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+ &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
new file mode 100644
index 000000000000..9c9e1288644b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+ struct mt7601u_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+ const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+ unsigned int hdrlen;
+
+ if (unlikely(len < 10))
+ return 0;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (unlikely(hdrlen > len))
+ return 0;
+ return hdrlen;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+ u8 *data, u32 seg_len)
+{
+ struct sk_buff *skb;
+ u32 true_len;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
+ seg_len -= 2;
+
+ skb = alloc_skb(seg_len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
+
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+ data += hdr_len + 2;
+ seg_len -= hdr_len;
+ }
+
+ memcpy(skb_put(skb, seg_len), data, seg_len);
+
+ true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi);
+ skb_trim(skb, true_len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, void *data,
+ u32 seg_len, u32 truesize, struct page *p)
+{
+ unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
+ unsigned int true_len, copy, frag;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+ memcpy(skb_put(skb, hdr_len), data, hdr_len);
+ data += hdr_len + 2;
+ true_len -= hdr_len;
+ hdr_len = 0;
+ }
+
+ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+ frag = true_len - copy;
+
+ memcpy(skb_put(skb, copy), data, copy);
+ data += copy;
+
+ if (frag) {
+ skb_add_rx_frag(skb, 0, p, data - page_address(p),
+ frag, truesize);
+ get_page(p);
+ }
+
+ return skb;
+}
+
+static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
+ u32 seg_len, struct page *p, bool paged)
+{
+ struct sk_buff *skb;
+ struct mt7601u_rxwi *rxwi;
+ u32 fce_info, truesize = seg_len;
+
+ /* DMA_INFO field at the beginning of the segment contains only some of
+ * the information, we need to read the FCE descriptor from the end.
+ */
+ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+ seg_len -= MT_FCE_INFO_LEN;
+
+ data += MT_DMA_HDR_LEN;
+ seg_len -= MT_DMA_HDR_LEN;
+
+ rxwi = (struct mt7601u_rxwi *) data;
+ data += sizeof(struct mt7601u_rxwi);
+ seg_len -= sizeof(struct mt7601u_rxwi);
+
+ if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
+ dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
+ if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+ dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
+
+ trace_mt_rx(dev, rxwi, fce_info);
+
+ if (paged)
+ skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len,
+ truesize, p);
+ else
+ skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len);
+ if (!skb)
+ return;
+
+ ieee80211_rx_ni(dev->hw, skb);
+}
+
+static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+{
+ u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+ sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
+ u16 dma_len = get_unaligned_le16(data);
+
+ if (data_len < min_seg_len ||
+ WARN_ON(!dma_len) ||
+ WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+ WARN_ON(dma_len & 0x3))
+ return 0;
+
+ return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
+{
+ u32 seg_len, data_len = e->urb->actual_length;
+ u8 *data = page_address(e->p);
+ struct page *new_p = NULL;
+ bool paged = true;
+ int cnt = 0;
+
+ if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+ return;
+
+ /* Copy if there is very little data in the buffer. */
+ if (data_len < 512) {
+ paged = false;
+ } else {
+ new_p = dev_alloc_pages(MT_RX_ORDER);
+ if (!new_p)
+ paged = false;
+ }
+
+ while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
+ mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged);
+
+ data_len -= seg_len;
+ data += seg_len;
+ cnt++;
+ }
+
+ if (cnt > 1)
+ trace_mt_rx_dma_aggr(dev, cnt, paged);
+
+ if (paged) {
+ /* we have one extra ref from the allocator */
+ __free_pages(e->p, MT_RX_ORDER);
+
+ e->p = new_p;
+ }
+}
+
+static struct mt7601u_dma_buf_rx *
+mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
+{
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ struct mt7601u_dma_buf_rx *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (!q->pending)
+ goto out;
+
+ buf = &q->e[q->start];
+ q->pending--;
+ q->start = (q->start + 1) % q->entries;
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+ return buf;
+}
+
+static void mt7601u_complete_rx(struct urb *urb)
+{
+ struct mt7601u_dev *dev = urb->context;
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ if (mt7601u_urb_has_error(urb))
+ dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+ q->end = (q->end + 1) % q->entries;
+ q->pending++;
+ tasklet_schedule(&dev->rx_tasklet);
+out:
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt7601u_rx_tasklet(unsigned long data)
+{
+ struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+ struct mt7601u_dma_buf_rx *e;
+
+ while ((e = mt7601u_rx_get_pending_entry(dev))) {
+ if (e->urb->status)
+ continue;
+
+ mt7601u_rx_process_entry(dev, e);
+ mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
+ }
+}
+
+static void mt7601u_complete_tx(struct urb *urb)
+{
+ struct mt7601u_tx_queue *q = urb->context;
+ struct mt7601u_dev *dev = q->dev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (mt7601u_urb_has_error(urb))
+ dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
+ trace_mt_tx_dma_done(dev, skb);
+
+ mt7601u_tx_status(dev, skb);
+
+ if (q->used == q->entries - q->entries / 8)
+ ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
+
+ q->start = (q->start + 1) % q->entries;
+ q->used--;
+
+ if (urb->status)
+ goto out;
+
+ set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
+ if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+ struct sk_buff *skb, u8 ep)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
+ struct mt7601u_dma_buf_tx *e;
+ struct mt7601u_tx_queue *q = &dev->tx_q[ep];
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+
+ if (WARN_ON(q->entries <= q->used)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ e = &q->e[q->end];
+ e->skb = skb;
+ usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+ mt7601u_complete_tx, q);
+ ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+ if (ret) {
+ /* Special-handle ENODEV from TX urb submission because it will
+ * often be the first ENODEV we see after device is removed.
+ */
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
+ else
+ dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
+ ret);
+ goto out;
+ }
+
+ q->end = (q->end + 1) % q->entries;
+ q->used++;
+
+ if (q->used >= q->entries)
+ ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+out:
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+ return ret;
+}
+
+/* Map hardware Q to USB endpoint number */
+static u8 q2ep(u8 qid)
+{
+ /* TODO: take management packets to queue 5 */
+ return qid + 1;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+ if (ep == 5)
+ return MT_QSEL_MGMT;
+ return MT_QSEL_EDCA;
+}
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q)
+{
+ u8 ep = q2ep(hw_q);
+ u32 dma_flags;
+ int ret;
+
+ dma_flags = MT_TXD_PKT_INFO_80211;
+ if (wcid->hw_key_idx == 0xff)
+ dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+ ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_dma_submit_tx(dev, skb, ep);
+ if (ret) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->rx_lock, flags);
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ int next = dev->rx_q.end;
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+ usb_poison_urb(dev->rx_q.e[next].urb);
+ spin_lock_irqsave(&dev->rx_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+ struct mt7601u_dma_buf_rx *e, gfp_t gfp)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ u8 *buf = page_address(e->p);
+ unsigned pipe;
+ int ret;
+
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
+
+ usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+ mt7601u_complete_rx, dev);
+
+ trace_mt_submit_urb(dev, e->urb);
+ ret = usb_submit_urb(e->urb, gfp);
+ if (ret)
+ dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
+
+ return ret;
+}
+
+static int mt7601u_submit_rx(struct mt7601u_dev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mt7601u_free_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->rx_q.entries; i++) {
+ __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+ usb_free_urb(dev->rx_q.e[i].urb);
+ }
+}
+
+static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+ dev->rx_q.dev = dev;
+ dev->rx_q.entries = N_RX_ENTRIES;
+
+ for (i = 0; i < N_RX_ENTRIES; i++) {
+ dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+ if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+{
+ int i;
+
+ WARN_ON(q->used);
+
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
+ usb_free_urb(q->e[i].urb);
+ }
+}
+
+static void mt7601u_free_tx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ mt7601u_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
+ struct mt7601u_tx_queue *q)
+{
+ int i;
+
+ q->dev = dev;
+ q->entries = N_TX_ENTRIES;
+
+ for (i = 0; i < N_TX_ENTRIES; i++) {
+ q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!q->e[i].urb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
+{
+ int i;
+
+ dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
+ sizeof(*dev->tx_q), GFP_KERNEL);
+
+ for (i = 0; i < __MT_EP_OUT_MAX; i++)
+ if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev)
+{
+ int ret = -ENOMEM;
+
+ tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
+
+ ret = mt7601u_alloc_tx(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_alloc_rx(dev);
+ if (ret)
+ goto err;
+
+ ret = mt7601u_submit_rx(dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ mt7601u_dma_cleanup(dev);
+ return ret;
+}
+
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
+{
+ mt7601u_kill_rx(dev);
+
+ tasklet_kill(&dev->rx_tasklet);
+
+ mt7601u_free_rx(dev);
+ mt7601u_free_tx(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
new file mode 100644
index 000000000000..978e8a90b87f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_DMA_H
+#define __MT7601U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#include "util.h"
+
+#define MT_DMA_HDR_LEN 4
+#define MT_RX_INFO_LEN 4
+#define MT_FCE_INFO_LEN 4
+#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE GENMASK(31, 30)
+
+enum mt76_msg_port {
+ WLAN_PORT,
+ CPU_RX_PORT,
+ CPU_TX_PORT,
+ HOST_PORT,
+ VIRTUAL_CPU_RX_PORT,
+ VIRTUAL_CPU_TX_PORT,
+ DISCARD,
+};
+
+enum mt76_info_type {
+ DMA_PACKET,
+ DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
+#define MT_TXD_PKT_INFO_80211 BIT(19)
+#define MT_TXD_PKT_INFO_TSO BIT(20)
+#define MT_TXD_PKT_INFO_CSO BIT(21)
+#define MT_TXD_PKT_INFO_WIV BIT(24)
+#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
+
+enum mt76_qsel {
+ MT_QSEL_MGMT,
+ MT_QSEL_HCCA,
+ MT_QSEL_EDCA,
+ MT_QSEL_EDCA_2,
+};
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_INFO_SEQ GENMASK(19, 16)
+#define MT_TXD_CMD_INFO_TYPE GENMASK(26, 20)
+
+static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
+ enum mt76_msg_port d_port,
+ enum mt76_info_type type, u32 flags)
+{
+ u32 info;
+
+ /* Buffer layout:
+ * | 4B | xfer len | pad | 4B |
+ * | TXINFO | pkt/cmd | zero pad to 4B | zero |
+ *
+ * length field of TXINFO should be set to 'xfer len'.
+ */
+
+ info = flags |
+ MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
+ MT76_SET(MT_TXD_INFO_TYPE, type);
+
+ put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+ flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+ return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR BIT(24)
+#define MT_RXD_INFO_QSEL GENMASK(26, 25)
+#define MT_RXD_INFO_PORT GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
+
+enum mt76_evt_type {
+ CMD_DONE,
+ CMD_ERROR,
+ CMD_RETRY,
+ EVENT_PWR_RSP,
+ EVENT_WOW_RSP,
+ EVENT_CARRIER_DETECT_RSP,
+ EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
new file mode 100644
index 000000000000..ce3837f270f0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+ return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+ if (!field_valid(val))
+ return 0;
+
+ return val;
+}
+
+static int
+mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
+ enum mt7601u_eeprom_access_modes mode)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN |
+ MT_EFUSE_CTRL_MODE);
+ val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+ MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ val = mt76_rr(dev, MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+ /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+ * will not return valid data but it's ok.
+ */
+ memset(data, 0xff, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, MT_EFUSE_DATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int
+mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
+{
+ const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+ u8 data[map_reads * 16];
+ int ret, i;
+ u32 start = 0, end = 0, cnt_free;
+
+ for (i = 0; i < map_reads; i++) {
+ ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+ data + i * 16, MT_EE_PHYSICAL_READ);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+ if (!data[i]) {
+ if (!start)
+ start = MT_EE_USAGE_MAP_START + i;
+ end = MT_EE_USAGE_MAP_START + i;
+ }
+ cnt_free = end - start + 1;
+
+ if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+ dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool
+mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static void
+mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+ u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+ if (!field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+ dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
+ !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
+
+ if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+ dev_err(dev->dev,
+ "Error: this driver does not support HW RF ctrl\n");
+
+ if (!field_valid(nic_conf0 >> 8))
+ return;
+
+ if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ dev_err(dev->dev,
+ "Error: device has more than 1 RX/TX stream!\n");
+}
+
+static int
+mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
+{
+ const void *src = eeprom + MT_EE_MAC_ADDR;
+
+ ether_addr_copy(dev->macaddr, src);
+
+ if (!is_valid_ether_addr(dev->macaddr)) {
+ eth_random_addr(dev->macaddr);
+ dev_info(dev->dev,
+ "Invalid MAC address, using random address %pM\n",
+ dev->macaddr);
+ }
+
+ mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+ mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+ MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+ return 0;
+}
+
+static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
+ u8 *eeprom, u8 max_pwr)
+{
+ u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
+
+ if (trgt_pwr > max_pwr || !trgt_pwr) {
+ dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
+ trgt_pwr);
+ trgt_pwr = 0x20;
+ }
+
+ memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
+}
+
+static void
+mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u32 i, val;
+ u8 max_pwr;
+
+ val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
+ max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+
+ if (mt7601u_has_tssi(dev, eeprom)) {
+ mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
+ return;
+ }
+
+ for (i = 0; i < 14; i++) {
+ s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
+
+ if (power > max_pwr || power < 0)
+ power = MT7601U_DEFAULT_TX_POWER;
+
+ dev->ee->chan_pwr[i] = power;
+ }
+}
+
+static void
+mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
+ * - comments in rtmp_def.h are incorrect (see rt_channel.c)
+ */
+ static const struct reg_channel_bounds chan_bounds[] = {
+ /* EEPROM country regions 0 - 7 */
+ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
+ { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
+ /* EEPROM country regions 32 - 33 */
+ { 1, 11 }, { 1, 14 }
+ };
+ u8 val = eeprom[MT_EE_COUNTRY_REGION];
+ int idx = -1;
+
+ if (val < 8)
+ idx = val;
+ if (val > 31 && val < 33)
+ idx = val - 32 + 8;
+
+ if (idx != -1)
+ dev_info(dev->dev,
+ "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+ val, chan_bounds[idx].start,
+ chan_bounds[idx].start + chan_bounds[idx].num - 1);
+ else
+ idx = 5; /* channels 1 - 14 */
+
+ dev->ee->reg = chan_bounds[idx];
+
+ /* TODO: country region 33 is special - phy should be set to B-mode
+ * before entering channel 14 (see sta/connect.c)
+ */
+}
+
+static void
+mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u8 comp;
+
+ dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+ comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+ if (comp & BIT(7))
+ dev->ee->rf_freq_off -= comp & 0x7f;
+ else
+ dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ int i;
+ s8 *rssi_offset = dev->ee->rssi_offset;
+
+ for (i = 0; i < 2; i++) {
+ rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+ if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+ dev_warn(dev->dev,
+ "Warning: EEPROM RSSI is invalid %02hhx\n",
+ rssi_offset[i]);
+ rssi_offset[i] = 0;
+ }
+ }
+}
+
+static void
+mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
+{
+ u32 val;
+
+ val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
+ val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
+
+ val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
+{
+ rate->raw = s6_validate(value);
+ rate->bw20 = s6_to_int(value);
+ /* Note: vendor driver does cap the value to s6 right away */
+ rate->bw40 = rate->bw20 + delta;
+}
+
+static void
+mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
+{
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+ switch (i) {
+ case 0:
+ mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
+ /* Save cck bw20 for fixups of channel 14 */
+ dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
+ dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
+
+ mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
+ mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
+ break;
+ case 1:
+ mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
+ mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
+ mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
+ break;
+ case 2:
+ mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
+ mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
+ break;
+ }
+}
+
+static s8
+get_delta(u8 val)
+{
+ s8 ret;
+
+ if (!field_valid(val) || !(val & BIT(7)))
+ return 0;
+
+ ret = val & 0x1f;
+ if (ret > 8)
+ ret = 8;
+ if (val & BIT(6))
+ ret = -ret;
+
+ return ret;
+}
+
+static void
+mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ u32 val;
+ s8 bw40_delta;
+ int i;
+
+ bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+
+ for (i = 0; i < 5; i++) {
+ val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+ mt7601u_save_power_rate(dev, bw40_delta, val, i);
+
+ if (~val)
+ mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
+ }
+
+ mt7601u_extra_power_over_mac(dev);
+}
+
+static void
+mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
+{
+ struct tssi_data *d = &dev->ee->tssi_data;
+
+ if (!dev->ee->tssi_enabled)
+ return;
+
+ d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
+ d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
+ d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
+ d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
+ d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
+}
+
+int
+mt7601u_eeprom_init(struct mt7601u_dev *dev)
+{
+ u8 *eeprom;
+ int i, ret;
+
+ ret = mt7601u_efuse_physical_size_check(dev);
+ if (ret)
+ return ret;
+
+ dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
+ if (!dev->ee)
+ return -ENOMEM;
+
+ eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
+ ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+ if (ret)
+ goto out;
+ }
+
+ if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
+ dev_warn(dev->dev,
+ "Warning: unsupported EEPROM version %02hhx\n",
+ eeprom[MT_EE_VERSION_EE]);
+ dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+ eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+ mt7601u_set_macaddr(dev, eeprom);
+ mt7601u_set_chip_cap(dev, eeprom);
+ mt7601u_set_channel_power(dev, eeprom);
+ mt7601u_set_country_reg(dev, eeprom);
+ mt7601u_set_rf_freq_off(dev, eeprom);
+ mt7601u_set_rssi_offset(dev, eeprom);
+ dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
+ dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
+
+ mt7601u_config_tx_power_per_rate(dev, eeprom);
+
+ mt7601u_init_tssi_params(dev, eeprom);
+out:
+ kfree(eeprom);
+ return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
new file mode 100644
index 000000000000..662d12703b69
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_EEPROM_H
+#define __MT7601U_EEPROM_H
+
+struct mt7601u_dev;
+
+#define MT7601U_EE_MAX_VER 0x0c
+#define MT7601U_EEPROM_SIZE 256
+
+#define MT7601U_DEFAULT_TX_POWER 6
+
+enum mt76_eeprom_field {
+ MT_EE_CHIP_ID = 0x00,
+ MT_EE_VERSION_FAE = 0x02,
+ MT_EE_VERSION_EE = 0x03,
+ MT_EE_MAC_ADDR = 0x04,
+ MT_EE_NIC_CONF_0 = 0x34,
+ MT_EE_NIC_CONF_1 = 0x36,
+ MT_EE_COUNTRY_REGION = 0x39,
+ MT_EE_FREQ_OFFSET = 0x3a,
+ MT_EE_NIC_CONF_2 = 0x42,
+
+ MT_EE_LNA_GAIN = 0x44,
+ MT_EE_RSSI_OFFSET = 0x46,
+
+ MT_EE_TX_POWER_DELTA_BW40 = 0x50,
+ MT_EE_TX_POWER_OFFSET = 0x52,
+
+ MT_EE_TX_TSSI_SLOPE = 0x6e,
+ MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
+ MT_EE_TX_TSSI_OFFSET = 0x76,
+
+ MT_EE_TX_TSSI_TARGET_POWER = 0xd0,
+ MT_EE_REF_TEMP = 0xd1,
+ MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
+ MT_EE_TX_POWER_BYRATE_BASE = 0xde,
+
+ MT_EE_USAGE_MAP_START = 0x1e0,
+ MT_EE_USAGE_MAP_END = 0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
+ (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
+ MT_EE_USAGE_MAP_START + 1)
+
+enum mt7601u_eeprom_access_modes {
+ MT_EE_READ = 0,
+ MT_EE_PHYSICAL_READ = 1,
+};
+
+struct power_per_rate {
+ u8 raw; /* validated s6 value */
+ s8 bw20; /* sign-extended int */
+ s8 bw40; /* sign-extended int */
+};
+
+/* Power per rate - one value per two rates */
+struct mt7601u_rate_power {
+ struct power_per_rate cck[2];
+ struct power_per_rate ofdm[4];
+ struct power_per_rate ht[4];
+};
+
+struct reg_channel_bounds {
+ u8 start;
+ u8 num;
+};
+
+struct mt7601u_eeprom_params {
+ bool tssi_enabled;
+ u8 rf_freq_off;
+ s8 rssi_offset[2];
+ s8 ref_temp;
+ s8 lna_gain;
+
+ u8 chan_pwr[14];
+ struct mt7601u_rate_power power_rate_table;
+ s8 real_cck_bw20[2];
+
+ /* TSSI stuff - only with internal TX ALC */
+ struct tssi_data {
+ int tx0_delta_offset;
+ u8 slope;
+ u8 offset[3];
+ } tssi_data;
+
+ struct reg_channel_bounds reg;
+};
+
+int mt7601u_eeprom_init(struct mt7601u_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+ WARN_ON(reg & ~GENMASK(5, 0));
+ return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+ int s6;
+
+ s6 = s6_validate(reg);
+ if (s6 & BIT(5))
+ s6 -= BIT(6);
+
+ return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+ if (val < -0x20)
+ return 0x20;
+ if (val > 0x1f)
+ return 0x1f;
+
+ return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
new file mode 100644
index 000000000000..ca1ea2e55ff4
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -0,0 +1,625 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+
+#include "initvals.h"
+
+static void
+mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
+{
+ int i;
+
+ /* Note: we don't turn off WLAN_CLK because that makes the device
+ * not respond properly on the probe path.
+ * In case anyone (PSM?) wants to use this function we can
+ * bring the clock stuff back and fixup the probe path.
+ */
+
+ if (enable)
+ val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+ MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+ else
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ if (enable) {
+ set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+ } else {
+ clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+ return;
+ }
+
+ for (i = 200; i; i--) {
+ val = mt7601u_rr(dev, MT_CMB_CTRL);
+
+ if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+ break;
+
+ udelay(20);
+ }
+
+ /* Note: vendor driver tries to disable/enable wlan here and retry
+ * but the code which does it is so buggy it must have never
+ * triggered, so don't bother.
+ */
+ if (!i)
+ dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
+}
+
+static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
+{
+ u32 val;
+
+ mutex_lock(&dev->hw_atomic_mutex);
+
+ val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
+
+ if (reset) {
+ val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+ val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+ if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+ val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+ MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+ }
+ }
+
+ mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+ udelay(20);
+
+ mt7601u_set_wlan_state(dev, val, enable);
+
+ mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
+{
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
+ MT_MAC_SYS_CTRL_RESET_BBP));
+ mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
+ msleep(1);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+}
+
+static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
+{
+ u32 val;
+
+ val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN;
+ if (dev->in_max_packet == 512)
+ val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+ val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+ val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static int mt7601u_init_bbp(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_wait_bbp_ready(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
+ ARRAY_SIZE(bbp_common_vals));
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
+ ARRAY_SIZE(bbp_chip_vals));
+}
+
+static void
+mt76_init_beacon_offsets(struct mt7601u_dev *dev)
+{
+ u16 base = MT_BEACON_BASE;
+ u32 regs[4] = {};
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ u16 addr = dev->beacon_offsets[i];
+
+ regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
+ ARRAY_SIZE(mac_common_vals));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
+ mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
+ if (ret)
+ return ret;
+
+ mt76_init_beacon_offsets(dev);
+
+ mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
+
+ return 0;
+}
+
+static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS; i++) {
+ vals[i * 2] = 0xffffffff;
+ vals[i * 2 + 1] = 0x00ffffff;
+ }
+
+ ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
+{
+ u32 vals[4] = {};
+
+ return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+ vals, ARRAY_SIZE(vals));
+}
+
+static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
+{
+ u32 *vals;
+ int i, ret;
+
+ vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ for (i = 0; i < N_WCIDS * 2; i++)
+ vals[i] = 1;
+
+ ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+ vals, N_WCIDS * 2);
+ kfree(vals);
+
+ return ret;
+}
+
+static void mt7601u_reset_counters(struct mt7601u_dev *dev)
+{
+ mt7601u_rr(dev, MT_RX_STA_CNT0);
+ mt7601u_rr(dev, MT_RX_STA_CNT1);
+ mt7601u_rr(dev, MT_RX_STA_CNT2);
+ mt7601u_rr(dev, MT_TX_STA_CNT0);
+ mt7601u_rr(dev, MT_TX_STA_CNT1);
+ mt7601u_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt7601u_mac_start(struct mt7601u_dev *dev)
+{
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+ return -ETIMEDOUT;
+
+ dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+ MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+ MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+ MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+ MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+ mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
+{
+ int i, ok;
+
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+ dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
+
+ /* Page count on TxQ */
+ i = 200;
+ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+ (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+ msleep(10);
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+ dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
+
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+ MT_MAC_SYS_CTRL_ENABLE_TX);
+
+ /* Page count on RxQ */
+ ok = 0;
+ i = 200;
+ while (i--) {
+ if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
+ (mt76_rr(dev, 0x0a30) & 0xffffffff) ||
+ (mt76_rr(dev, 0x0a34) & 0xffffffff))
+ ok++;
+ if (ok > 6)
+ break;
+
+ msleep(1);
+ }
+
+ if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+ dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
+
+ if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+ dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt7601u_mac_stop(struct mt7601u_dev *dev)
+{
+ mt7601u_mac_stop_hw(dev);
+ flush_delayed_work(&dev->stat_work);
+ cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
+{
+ mt7601u_chip_onoff(dev, false, false);
+}
+
+int mt7601u_init_hardware(struct mt7601u_dev *dev)
+{
+ static const u16 beacon_offsets[16] = {
+ /* 512 byte per beacon */
+ 0xc000, 0xc200, 0xc400, 0xc600,
+ 0xc800, 0xca00, 0xcc00, 0xce00,
+ 0xd000, 0xd200, 0xd400, 0xd600,
+ 0xd800, 0xda00, 0xdc00, 0xde00
+ };
+ int ret;
+
+ dev->beacon_offsets = beacon_offsets;
+
+ mt7601u_chip_onoff(dev, true, false);
+
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_mcu_init(dev);
+ if (ret)
+ goto err;
+
+ if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Wait for ASIC ready after FW load. */
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ mt7601u_reset_csr_bbp(dev);
+ mt7601u_init_usb_dma(dev);
+
+ ret = mt7601u_mcu_cmd_init(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_dma_init(dev);
+ if (ret)
+ goto err_mcu;
+ ret = mt7601u_write_mac_initvals(dev);
+ if (ret)
+ goto err_rx;
+
+ if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+ MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
+ ret = -EIO;
+ goto err_rx;
+ }
+
+ ret = mt7601u_init_bbp(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_wcid_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_key_mem(dev);
+ if (ret)
+ goto err_rx;
+ ret = mt7601u_init_wcid_attr_mem(dev);
+ if (ret)
+ goto err_rx;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+
+ mt7601u_reset_counters(dev);
+
+ mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+ mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
+ MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+ ret = mt7601u_eeprom_init(dev);
+ if (ret)
+ goto err_rx;
+
+ ret = mt7601u_phy_init(dev);
+ if (ret)
+ goto err_rx;
+
+ mt7601u_set_rx_path(dev, 0);
+ mt7601u_set_tx_dac(dev, 0);
+
+ mt7601u_mac_set_ctrlch(dev, false);
+ mt7601u_bbp_set_ctrlch(dev, false);
+ mt7601u_bbp_set_bw(dev, MT_BW_20);
+
+ return 0;
+
+err_rx:
+ mt7601u_dma_cleanup(dev);
+err_mcu:
+ mt7601u_mcu_cmd_deinit(dev);
+err:
+ mt7601u_chip_onoff(dev, false, false);
+ return ret;
+}
+
+void mt7601u_cleanup(struct mt7601u_dev *dev)
+{
+ mt7601u_stop_hardware(dev);
+ mt7601u_dma_cleanup(dev);
+ mt7601u_mcu_cmd_deinit(dev);
+}
+
+struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
+{
+ struct ieee80211_hw *hw;
+ struct mt7601u_dev *dev;
+
+ hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
+ if (!hw)
+ return NULL;
+
+ dev = hw->priv;
+ dev->dev = pdev;
+ dev->hw = hw;
+ mutex_init(&dev->vendor_req_mutex);
+ mutex_init(&dev->reg_atomic_mutex);
+ mutex_init(&dev->hw_atomic_mutex);
+ mutex_init(&dev->mutex);
+ spin_lock_init(&dev->tx_lock);
+ spin_lock_init(&dev->rx_lock);
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->con_mon_lock);
+ atomic_set(&dev->avg_ampdu_len, 1);
+
+ dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
+ if (!dev->stat_wq) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
+ return dev;
+}
+
+#define CHAN2G(_idx, _freq) { \
+ .band = IEEE80211_BAND_2GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+ CHAN2G(1, 2412),
+ CHAN2G(2, 2417),
+ CHAN2G(3, 2422),
+ CHAN2G(4, 2427),
+ CHAN2G(5, 2432),
+ CHAN2G(6, 2437),
+ CHAN2G(7, 2442),
+ CHAN2G(8, 2447),
+ CHAN2G(9, 2452),
+ CHAN2G(10, 2457),
+ CHAN2G(11, 2462),
+ CHAN2G(12, 2467),
+ CHAN2G(13, 2472),
+ CHAN2G(14, 2484),
+};
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(0, 60),
+ OFDM_RATE(1, 90),
+ OFDM_RATE(2, 120),
+ OFDM_RATE(3, 180),
+ OFDM_RATE(4, 240),
+ OFDM_RATE(5, 360),
+ OFDM_RATE(6, 480),
+ OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
+ const struct ieee80211_channel *chan, int n_chan,
+ struct ieee80211_rate *rates, int n_rates)
+{
+ struct ieee80211_sta_ht_cap *ht_cap;
+ void *chanlist;
+ int size;
+
+ size = n_chan * sizeof(*chan);
+ chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+ if (!chanlist)
+ return -ENOMEM;
+
+ sband->channels = chanlist;
+ sband->n_channels = n_chan;
+ sband->bitrates = rates;
+ sband->n_bitrates = n_rates;
+
+ ht_cap = &sband->ht_cap;
+ ht_cap->ht_supported = true;
+ ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ ht_cap->mcs.rx_mask[0] = 0xff;
+ ht_cap->mcs.rx_mask[4] = 0x1;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+ dev->chandef.chan = &sband->channels[0];
+
+ return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt7601u_dev *dev)
+{
+ dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
+ GFP_KERNEL);
+ dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+
+ WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+ ARRAY_SIZE(mt76_channels_2ghz));
+
+ return mt76_init_sband(dev, dev->sband_2g,
+ &mt76_channels_2ghz[dev->ee->reg.start - 1],
+ dev->ee->reg.num,
+ mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+int mt7601u_register_device(struct mt7601u_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+ * entry no. 1 like it does in the vendor driver.
+ */
+ dev->wcid_mask[0] |= 1;
+
+ /* init fake wcid for monitor interfaces */
+ dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
+ GFP_KERNEL);
+ if (!dev->mon_wcid)
+ return -ENOMEM;
+ dev->mon_wcid->idx = 0xff;
+ dev->mon_wcid->hw_key_idx = -1;
+
+ SET_IEEE80211_DEV(hw, dev->dev);
+
+ hw->queues = 4;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+
+ hw->sta_data_size = sizeof(struct mt76_sta);
+ hw->vif_data_size = sizeof(struct mt76_vif);
+
+ SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+ wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ ret = mt76_init_sband_2g(dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
+ INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ mt7601u_init_debugfs(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h
new file mode 100644
index 000000000000..ec11ff66969d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h
@@ -0,0 +1,164 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_INITVALS_H
+#define __MT7601U_INITVALS_H
+
+static const struct mt76_reg_pair bbp_common_vals[] = {
+ { 65, 0x2c },
+ { 66, 0x38 },
+ { 68, 0x0b },
+ { 69, 0x12 },
+ { 70, 0x0a },
+ { 73, 0x10 },
+ { 81, 0x37 },
+ { 82, 0x62 },
+ { 83, 0x6a },
+ { 84, 0x99 },
+ { 86, 0x00 },
+ { 91, 0x04 },
+ { 92, 0x00 },
+ { 103, 0x00 },
+ { 105, 0x05 },
+ { 106, 0x35 },
+};
+
+static const struct mt76_reg_pair bbp_chip_vals[] = {
+ { 1, 0x04 }, { 4, 0x40 }, { 20, 0x06 }, { 31, 0x08 },
+ /* CCK Tx Control */
+ { 178, 0xff },
+ /* AGC/Sync controls */
+ { 66, 0x14 }, { 68, 0x8b }, { 69, 0x12 }, { 70, 0x09 },
+ { 73, 0x11 }, { 75, 0x60 }, { 76, 0x44 }, { 84, 0x9a },
+ { 86, 0x38 }, { 91, 0x07 }, { 92, 0x02 },
+ /* Rx Path Controls */
+ { 99, 0x50 }, { 101, 0x00 }, { 103, 0xc0 }, { 104, 0x92 },
+ { 105, 0x3c }, { 106, 0x03 }, { 128, 0x12 },
+ /* Change RXWI content: Gain Report */
+ { 142, 0x04 }, { 143, 0x37 },
+ /* Change RXWI content: Antenna Report */
+ { 142, 0x03 }, { 143, 0x99 },
+ /* Calibration Index Register */
+ /* CCK Receiver Control */
+ { 160, 0xeb }, { 161, 0xc4 }, { 162, 0x77 }, { 163, 0xf9 },
+ { 164, 0x88 }, { 165, 0x80 }, { 166, 0xff }, { 167, 0xe4 },
+ /* Added AGC controls - these AGC/GLRT registers are accessed
+ * through R195 and R196.
+ */
+ { 195, 0x00 }, { 196, 0x00 },
+ { 195, 0x01 }, { 196, 0x04 },
+ { 195, 0x02 }, { 196, 0x20 },
+ { 195, 0x03 }, { 196, 0x0a },
+ { 195, 0x06 }, { 196, 0x16 },
+ { 195, 0x07 }, { 196, 0x05 },
+ { 195, 0x08 }, { 196, 0x37 },
+ { 195, 0x0a }, { 196, 0x15 },
+ { 195, 0x0b }, { 196, 0x17 },
+ { 195, 0x0c }, { 196, 0x06 },
+ { 195, 0x0d }, { 196, 0x09 },
+ { 195, 0x0e }, { 196, 0x05 },
+ { 195, 0x0f }, { 196, 0x09 },
+ { 195, 0x10 }, { 196, 0x20 },
+ { 195, 0x20 }, { 196, 0x17 },
+ { 195, 0x21 }, { 196, 0x06 },
+ { 195, 0x22 }, { 196, 0x09 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x25 }, { 196, 0x09 },
+ { 195, 0x26 }, { 196, 0x17 },
+ { 195, 0x27 }, { 196, 0x06 },
+ { 195, 0x28 }, { 196, 0x09 },
+ { 195, 0x29 }, { 196, 0x05 },
+ { 195, 0x2a }, { 196, 0x09 },
+ { 195, 0x80 }, { 196, 0x8b },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x82 }, { 196, 0x09 },
+ { 195, 0x83 }, { 196, 0x17 },
+ { 195, 0x84 }, { 196, 0x11 },
+ { 195, 0x85 }, { 196, 0x00 },
+ { 195, 0x86 }, { 196, 0x00 },
+ { 195, 0x87 }, { 196, 0x18 },
+ { 195, 0x88 }, { 196, 0x60 },
+ { 195, 0x89 }, { 196, 0x44 },
+ { 195, 0x8a }, { 196, 0x8b },
+ { 195, 0x8b }, { 196, 0x8b },
+ { 195, 0x8c }, { 196, 0x8b },
+ { 195, 0x8d }, { 196, 0x8b },
+ { 195, 0x8e }, { 196, 0x09 },
+ { 195, 0x8f }, { 196, 0x09 },
+ { 195, 0x90 }, { 196, 0x09 },
+ { 195, 0x91 }, { 196, 0x09 },
+ { 195, 0x92 }, { 196, 0x11 },
+ { 195, 0x93 }, { 196, 0x11 },
+ { 195, 0x94 }, { 196, 0x11 },
+ { 195, 0x95 }, { 196, 0x11 },
+ /* PPAD */
+ { 47, 0x80 }, { 60, 0x80 }, { 150, 0xd2 }, { 151, 0x32 },
+ { 152, 0x23 }, { 153, 0x41 }, { 154, 0x00 }, { 155, 0x4f },
+ { 253, 0x7e }, { 195, 0x30 }, { 196, 0x32 }, { 195, 0x31 },
+ { 196, 0x23 }, { 195, 0x32 }, { 196, 0x45 }, { 195, 0x35 },
+ { 196, 0x4a }, { 195, 0x36 }, { 196, 0x5a }, { 195, 0x37 },
+ { 196, 0x5a },
+};
+
+static const struct mt76_reg_pair mac_common_vals[] = {
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0x00003fff },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000009f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x05740003 },
+ { MT_OFDM_PROT_CFG, 0x05740003 },
+ { MT_MM40_PROT_CFG, 0x03f44084 },
+ { MT_GF20_PROT_CFG, 0x01744004 },
+ { MT_GF40_PROT_CFG, 0x03f44084 },
+ { MT_MM20_PROT_CFG, 0x01744004 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x01092b20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+};
+
+static const struct mt76_reg_pair mac_chip_vals[] = {
+ { MT_TSO_CTRL, 0x00006050 },
+ { MT_BCN_OFFSET(0), 0x18100800 },
+ { MT_BCN_OFFSET(1), 0x38302820 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x7f723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_PAUSE_ENABLE_CONTROL1, 0x00000000 },
+ { MT_TX0_RF_GAIN_CORR, 0x003b0005 },
+ { MT_TX0_RF_GAIN_ATTEN, 0x00006900 },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000400 },
+ { MT_TX_ALC_VGA3, 0x00060006 },
+ { MT_TX_SW_CFG0, 0x00000402 },
+ { MT_TX_SW_CFG1, 0x00000000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_FCE_CSO, 0x0000030f },
+ { MT_FCE_PARAMETERS, 0x00256f0f },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
new file mode 100644
index 000000000000..a2bdc3e322bf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
@@ -0,0 +1,291 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_PHY_INITVALS_H
+#define __MT7601U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value) \
+ { MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
+
+static const struct mt76_reg_pair rf_central[] = {
+ /* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
+ RF_REG_PAIR(0, 0, 0x02),
+ RF_REG_PAIR(0, 1, 0x01),
+ RF_REG_PAIR(0, 2, 0x11),
+ RF_REG_PAIR(0, 3, 0xff),
+ RF_REG_PAIR(0, 4, 0x0a),
+ RF_REG_PAIR(0, 5, 0x20),
+ RF_REG_PAIR(0, 6, 0x00),
+ /* B/G */
+ RF_REG_PAIR(0, 7, 0x00),
+ RF_REG_PAIR(0, 8, 0x00),
+ RF_REG_PAIR(0, 9, 0x00),
+ RF_REG_PAIR(0, 10, 0x00),
+ RF_REG_PAIR(0, 11, 0x21),
+ /* XO */
+ RF_REG_PAIR(0, 13, 0x00), /* 40mhz xtal */
+ /* RF_REG_PAIR(0, 13, 0x13), */ /* 20mhz xtal */
+ RF_REG_PAIR(0, 14, 0x7c),
+ RF_REG_PAIR(0, 15, 0x22),
+ RF_REG_PAIR(0, 16, 0x80),
+ /* PLL */
+ RF_REG_PAIR(0, 17, 0x99),
+ RF_REG_PAIR(0, 18, 0x99),
+ RF_REG_PAIR(0, 19, 0x09),
+ RF_REG_PAIR(0, 20, 0x50),
+ RF_REG_PAIR(0, 21, 0xb0),
+ RF_REG_PAIR(0, 22, 0x00),
+ RF_REG_PAIR(0, 23, 0xc5),
+ RF_REG_PAIR(0, 24, 0xfc),
+ RF_REG_PAIR(0, 25, 0x40),
+ RF_REG_PAIR(0, 26, 0x4d),
+ RF_REG_PAIR(0, 27, 0x02),
+ RF_REG_PAIR(0, 28, 0x72),
+ RF_REG_PAIR(0, 29, 0x01),
+ RF_REG_PAIR(0, 30, 0x00),
+ RF_REG_PAIR(0, 31, 0x00),
+ /* test ports */
+ RF_REG_PAIR(0, 32, 0x00),
+ RF_REG_PAIR(0, 33, 0x00),
+ RF_REG_PAIR(0, 34, 0x23),
+ RF_REG_PAIR(0, 35, 0x01), /* change setting to reduce spurs */
+ RF_REG_PAIR(0, 36, 0x00),
+ RF_REG_PAIR(0, 37, 0x00),
+ /* ADC/DAC */
+ RF_REG_PAIR(0, 38, 0x00),
+ RF_REG_PAIR(0, 39, 0x20),
+ RF_REG_PAIR(0, 40, 0x00),
+ RF_REG_PAIR(0, 41, 0xd0),
+ RF_REG_PAIR(0, 42, 0x1b),
+ RF_REG_PAIR(0, 43, 0x02),
+ RF_REG_PAIR(0, 44, 0x00),
+};
+
+static const struct mt76_reg_pair rf_channel[] = {
+ RF_REG_PAIR(4, 0, 0x01),
+ RF_REG_PAIR(4, 1, 0x00),
+ RF_REG_PAIR(4, 2, 0x00),
+ RF_REG_PAIR(4, 3, 0x00),
+ /* LDO */
+ RF_REG_PAIR(4, 4, 0x00),
+ RF_REG_PAIR(4, 5, 0x08),
+ RF_REG_PAIR(4, 6, 0x00),
+ /* RX */
+ RF_REG_PAIR(4, 7, 0x5b),
+ RF_REG_PAIR(4, 8, 0x52),
+ RF_REG_PAIR(4, 9, 0xb6),
+ RF_REG_PAIR(4, 10, 0x57),
+ RF_REG_PAIR(4, 11, 0x33),
+ RF_REG_PAIR(4, 12, 0x22),
+ RF_REG_PAIR(4, 13, 0x3d),
+ RF_REG_PAIR(4, 14, 0x3e),
+ RF_REG_PAIR(4, 15, 0x13),
+ RF_REG_PAIR(4, 16, 0x22),
+ RF_REG_PAIR(4, 17, 0x23),
+ RF_REG_PAIR(4, 18, 0x02),
+ RF_REG_PAIR(4, 19, 0xa4),
+ RF_REG_PAIR(4, 20, 0x01),
+ RF_REG_PAIR(4, 21, 0x12),
+ RF_REG_PAIR(4, 22, 0x80),
+ RF_REG_PAIR(4, 23, 0xb3),
+ RF_REG_PAIR(4, 24, 0x00), /* reserved */
+ RF_REG_PAIR(4, 25, 0x00), /* reserved */
+ RF_REG_PAIR(4, 26, 0x00), /* reserved */
+ RF_REG_PAIR(4, 27, 0x00), /* reserved */
+ /* LOGEN */
+ RF_REG_PAIR(4, 28, 0x18),
+ RF_REG_PAIR(4, 29, 0xee),
+ RF_REG_PAIR(4, 30, 0x6b),
+ RF_REG_PAIR(4, 31, 0x31),
+ RF_REG_PAIR(4, 32, 0x5d),
+ RF_REG_PAIR(4, 33, 0x00), /* reserved */
+ /* TX */
+ RF_REG_PAIR(4, 34, 0x96),
+ RF_REG_PAIR(4, 35, 0x55),
+ RF_REG_PAIR(4, 36, 0x08),
+ RF_REG_PAIR(4, 37, 0xbb),
+ RF_REG_PAIR(4, 38, 0xb3),
+ RF_REG_PAIR(4, 39, 0xb3),
+ RF_REG_PAIR(4, 40, 0x03),
+ RF_REG_PAIR(4, 41, 0x00), /* reserved */
+ RF_REG_PAIR(4, 42, 0x00), /* reserved */
+ RF_REG_PAIR(4, 43, 0xc5),
+ RF_REG_PAIR(4, 44, 0xc5),
+ RF_REG_PAIR(4, 45, 0xc5),
+ RF_REG_PAIR(4, 46, 0x07),
+ RF_REG_PAIR(4, 47, 0xa8),
+ RF_REG_PAIR(4, 48, 0xef),
+ RF_REG_PAIR(4, 49, 0x1a),
+ /* PA */
+ RF_REG_PAIR(4, 54, 0x07),
+ RF_REG_PAIR(4, 55, 0xa7),
+ RF_REG_PAIR(4, 56, 0xcc),
+ RF_REG_PAIR(4, 57, 0x14),
+ RF_REG_PAIR(4, 58, 0x07),
+ RF_REG_PAIR(4, 59, 0xa8),
+ RF_REG_PAIR(4, 60, 0xd7),
+ RF_REG_PAIR(4, 61, 0x10),
+ RF_REG_PAIR(4, 62, 0x1c),
+ RF_REG_PAIR(4, 63, 0x00), /* reserved */
+};
+
+static const struct mt76_reg_pair rf_vga[] = {
+ RF_REG_PAIR(5, 0, 0x47),
+ RF_REG_PAIR(5, 1, 0x00),
+ RF_REG_PAIR(5, 2, 0x00),
+ RF_REG_PAIR(5, 3, 0x08),
+ RF_REG_PAIR(5, 4, 0x04),
+ RF_REG_PAIR(5, 5, 0x20),
+ RF_REG_PAIR(5, 6, 0x3a),
+ RF_REG_PAIR(5, 7, 0x3a),
+ RF_REG_PAIR(5, 8, 0x00),
+ RF_REG_PAIR(5, 9, 0x00),
+ RF_REG_PAIR(5, 10, 0x10),
+ RF_REG_PAIR(5, 11, 0x10),
+ RF_REG_PAIR(5, 12, 0x10),
+ RF_REG_PAIR(5, 13, 0x10),
+ RF_REG_PAIR(5, 14, 0x10),
+ RF_REG_PAIR(5, 15, 0x20),
+ RF_REG_PAIR(5, 16, 0x22),
+ RF_REG_PAIR(5, 17, 0x7c),
+ RF_REG_PAIR(5, 18, 0x00),
+ RF_REG_PAIR(5, 19, 0x00),
+ RF_REG_PAIR(5, 20, 0x00),
+ RF_REG_PAIR(5, 21, 0xf1),
+ RF_REG_PAIR(5, 22, 0x11),
+ RF_REG_PAIR(5, 23, 0x02),
+ RF_REG_PAIR(5, 24, 0x41),
+ RF_REG_PAIR(5, 25, 0x20),
+ RF_REG_PAIR(5, 26, 0x00),
+ RF_REG_PAIR(5, 27, 0xd7),
+ RF_REG_PAIR(5, 28, 0xa2),
+ RF_REG_PAIR(5, 29, 0x20),
+ RF_REG_PAIR(5, 30, 0x49),
+ RF_REG_PAIR(5, 31, 0x20),
+ RF_REG_PAIR(5, 32, 0x04),
+ RF_REG_PAIR(5, 33, 0xf1),
+ RF_REG_PAIR(5, 34, 0xa1),
+ RF_REG_PAIR(5, 35, 0x01),
+ RF_REG_PAIR(5, 41, 0x00),
+ RF_REG_PAIR(5, 42, 0x00),
+ RF_REG_PAIR(5, 43, 0x00),
+ RF_REG_PAIR(5, 44, 0x00),
+ RF_REG_PAIR(5, 45, 0x00),
+ RF_REG_PAIR(5, 46, 0x00),
+ RF_REG_PAIR(5, 47, 0x00),
+ RF_REG_PAIR(5, 48, 0x00),
+ RF_REG_PAIR(5, 49, 0x00),
+ RF_REG_PAIR(5, 50, 0x00),
+ RF_REG_PAIR(5, 51, 0x00),
+ RF_REG_PAIR(5, 52, 0x00),
+ RF_REG_PAIR(5, 53, 0x00),
+ RF_REG_PAIR(5, 54, 0x00),
+ RF_REG_PAIR(5, 55, 0x00),
+ RF_REG_PAIR(5, 56, 0x00),
+ RF_REG_PAIR(5, 57, 0x00),
+ RF_REG_PAIR(5, 58, 0x31),
+ RF_REG_PAIR(5, 59, 0x31),
+ RF_REG_PAIR(5, 60, 0x0a),
+ RF_REG_PAIR(5, 61, 0x02),
+ RF_REG_PAIR(5, 62, 0x00),
+ RF_REG_PAIR(5, 63, 0x00),
+};
+
+/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
+ * from channel switching. Seems stupid at best.
+ */
+static const struct mt76_reg_pair bbp_high_temp[] = {
+ { 75, 0x60 },
+ { 92, 0x02 },
+ { 178, 0xff }, /* For CCK CH14 OBW */
+ { 195, 0x88 }, { 196, 0x60 },
+}, bbp_high_temp_bw20[] = {
+ { 69, 0x12 },
+ { 91, 0x07 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+}, bbp_high_temp_bw40[] = {
+ { 69, 0x15 },
+ { 91, 0x04 },
+ { 195, 0x23 }, { 196, 0x12 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+}, bbp_low_temp[] = {
+ { 178, 0xff }, /* For CCK CH14 OBW */
+}, bbp_low_temp_bw20[] = {
+ { 69, 0x12 },
+ { 75, 0x5e },
+ { 91, 0x07 },
+ { 92, 0x02 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+ { 195, 0x88 }, { 196, 0x5e },
+}, bbp_low_temp_bw40[] = {
+ { 69, 0x15 },
+ { 75, 0x5c },
+ { 91, 0x04 },
+ { 92, 0x03 },
+ { 195, 0x23 }, { 196, 0x10 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+ { 195, 0x88 }, { 196, 0x5b },
+}, bbp_normal_temp[] = {
+ { 75, 0x60 },
+ { 92, 0x02 },
+ { 178, 0xff }, /* For CCK CH14 OBW */
+ { 195, 0x88 }, { 196, 0x60 },
+}, bbp_normal_temp_bw20[] = {
+ { 69, 0x12 },
+ { 91, 0x07 },
+ { 195, 0x23 }, { 196, 0x17 },
+ { 195, 0x24 }, { 196, 0x06 },
+ { 195, 0x81 }, { 196, 0x12 },
+ { 195, 0x83 }, { 196, 0x17 },
+}, bbp_normal_temp_bw40[] = {
+ { 69, 0x15 },
+ { 91, 0x04 },
+ { 195, 0x23 }, { 196, 0x12 },
+ { 195, 0x24 }, { 196, 0x08 },
+ { 195, 0x81 }, { 196, 0x15 },
+ { 195, 0x83 }, { 196, 0x16 },
+};
+
+#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
+
+static const struct reg_table {
+ const struct mt76_reg_pair *regs;
+ size_t n;
+} bbp_mode_table[3][3] = {
+ {
+ BBP_TABLE(bbp_normal_temp_bw20),
+ BBP_TABLE(bbp_normal_temp_bw40),
+ BBP_TABLE(bbp_normal_temp),
+ }, {
+ BBP_TABLE(bbp_high_temp_bw20),
+ BBP_TABLE(bbp_high_temp_bw40),
+ BBP_TABLE(bbp_high_temp),
+ }, {
+ BBP_TABLE(bbp_low_temp_bw20),
+ BBP_TABLE(bbp_low_temp_bw40),
+ BBP_TABLE(bbp_low_temp),
+ }
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
new file mode 100644
index 000000000000..c161bcc6a7fa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
+{
+ u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+
+ txrate->idx = 0;
+ txrate->flags = 0;
+ txrate->count = 1;
+
+ switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ txrate->idx = idx + 4;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8)
+ idx -= 8;
+
+ txrate->idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ txrate->flags |= IEEE80211_TX_RC_MCS;
+ txrate->idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+ txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+ if (rate & MT_TXWI_RATE_SGI)
+ txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
+ struct mt76_tx_status *st)
+{
+ struct ieee80211_tx_rate *rate = info->status.rates;
+ int cur_idx, last_rate;
+ int i;
+
+ last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+ mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
+ if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+ rate[last_rate + 1].idx = -1;
+
+ cur_idx = rate[last_rate].idx + st->retry;
+ for (i = 0; i <= last_rate; i++) {
+ rate[i].flags = rate[last_rate].flags;
+ rate[i].idx = max_t(int, 0, cur_idx - i);
+ rate[i].count = 1;
+ }
+
+ if (last_rate > 0)
+ rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = st->success;
+
+ if (st->is_probe)
+ info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+ if (st->aggr)
+ info->flags |= IEEE80211_TX_CTL_AMPDU |
+ IEEE80211_TX_STAT_AMPDU;
+
+ if (!st->ack_req)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ else if (st->success)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+ u16 rateval;
+ u8 phy, rate_idx;
+ u8 nss = 1;
+ u8 bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->chandef.chan->band;
+ u16 val;
+
+ r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ bw = 0;
+ }
+
+ rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
+ rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
+ rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rateval |= MT_RXWI_RATE_SGI;
+
+ *nss_val = nss;
+ return rateval;
+}
+
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+ wcid->tx_rate_set = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
+{
+ struct mt76_tx_status stat = {};
+ u32 val;
+
+ val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
+ stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
+ stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
+ stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
+ stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
+ stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+ stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
+ stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+
+ return stat;
+}
+
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ void *msta;
+
+ rcu_read_lock();
+ if (stat->wcid < ARRAY_SIZE(dev->wcid))
+ wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+ if (wcid) {
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ mt76_mac_fill_tx_status(dev, &info, stat);
+ ieee80211_tx_status_noskb(dev->hw, sta, &info);
+ rcu_read_unlock();
+}
+
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+ int ht_mode)
+{
+ int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ u32 prot[6];
+ bool ht_rts[4] = {};
+ int i;
+
+ prot[0] = MT_PROT_NAV_SHORT |
+ MT_PROT_TXOP_ALLOW_ALL |
+ MT_PROT_RTS_THR_EN;
+ prot[1] = prot[0];
+ if (legacy_prot)
+ prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+ prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+ prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+ if (legacy_prot) {
+ prot[2] |= MT_PROT_RATE_CCK_11;
+ prot[3] |= MT_PROT_RATE_CCK_11;
+ prot[4] |= MT_PROT_RATE_CCK_11;
+ prot[5] |= MT_PROT_RATE_CCK_11;
+ } else {
+ prot[2] |= MT_PROT_RATE_OFDM_24;
+ prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+ prot[4] |= MT_PROT_RATE_OFDM_24;
+ prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+ }
+
+ switch (mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ ht_rts[1] = ht_rts[3] = true;
+ break;
+
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+ break;
+ }
+
+ if (non_gf)
+ ht_rts[2] = ht_rts[3] = true;
+
+ for (i = 0; i < 4; i++)
+ if (ht_rts[i])
+ prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+ for (i = 0; i < 6; i++)
+ mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
+{
+ if (short_preamb)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
+{
+ u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
+
+ val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN);
+
+ if (!enable) {
+ mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
+ return;
+ }
+
+ val &= ~MT_BEACON_TIME_CFG_INTVAL;
+ val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
+{
+ u32 val = mt7601u_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->dev, "Error: MAC specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+
+void mt7601u_mac_work(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ mac_work.work);
+ struct {
+ u32 addr_base;
+ u32 span;
+ u64 *stat_base;
+ } spans[] = {
+ { MT_RX_STA_CNT0, 3, dev->stats.rx_stat },
+ { MT_TX_STA_CNT0, 3, dev->stats.tx_stat },
+ { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
+ { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
+ { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
+ { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
+ };
+ u32 sum, n;
+ int i, j, k;
+
+ /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+ * registers by hand. MCU takes ca. 20ms to complete read of 24
+ * registers while reading them one by one will takes roughly
+ * 24*200us =~ 5ms.
+ */
+
+ k = 0;
+ n = 0;
+ sum = 0;
+ for (i = 0; i < ARRAY_SIZE(spans); i++)
+ for (j = 0; j < spans[i].span; j++) {
+ u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
+
+ spans[i].stat_base[j * 2] += val & 0xffff;
+ spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+ /* Calculate average AMPDU length */
+ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+ spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+ continue;
+
+ n += (val >> 16) + (val & 0xffff);
+ sum += (val & 0xffff) * (1 + k * 2) +
+ (val >> 16) * (2 + k * 2);
+ k++;
+ }
+
+ atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+ mt7601u_check_mac_err(dev);
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+ u8 zmac[ETH_ALEN] = {};
+ u32 attr;
+
+ attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+ mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+ if (mac)
+ memcpy(zmac, mac, sizeof(zmac));
+
+ mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
+{
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ void *msta;
+ u8 min_factor = 3;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ msta = container_of(wcid, struct mt76_sta, wcid);
+ sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+ min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+ }
+ rcu_read_unlock();
+
+ mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+ MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+ u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+
+ switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+ case MT_PHY_TYPE_OFDM:
+ if (WARN_ON(idx >= 8))
+ idx = 0;
+ idx += 4;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_CCK:
+ if (idx >= 8) {
+ idx -= 8;
+ status->flag |= RX_FLAG_SHORTPRE;
+ }
+
+ if (WARN_ON(idx >= 4))
+ idx = 0;
+
+ status->rate_idx = idx;
+ return;
+ case MT_PHY_TYPE_HT_GF:
+ status->flag |= RX_FLAG_HT_GF;
+ /* fall through */
+ case MT_PHY_TYPE_HT:
+ status->flag |= RX_FLAG_HT;
+ status->rate_idx = idx;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (rate & MT_RXWI_RATE_SGI)
+ status->flag |= RX_FLAG_SHORT_GI;
+
+ if (rate & MT_RXWI_RATE_STBC)
+ status->flag |= 1 << RX_FLAG_STBC_SHIFT;
+
+ if (rate & MT_RXWI_RATE_BW)
+ status->flag |= RX_FLAG_40MHZ;
+}
+
+static void
+mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+ u16 rate, int rssi)
+{
+ dev->bcn_freq_off = rxwi->freq_off;
+ dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+}
+
+static int
+mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+ return ieee80211_is_beacon(hdr->frame_control) &&
+ ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi)
+{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct mt7601u_rxwi *rxwi = rxi;
+ u32 ctl = le32_to_cpu(rxwi->ctl);
+ u16 rate = le16_to_cpu(rxwi->rate);
+ int rssi;
+
+ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+ }
+
+ status->chains = BIT(0);
+ rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
+ status->chain_signal[0] = status->signal = rssi;
+ status->freq = dev->chandef.chan->center_freq;
+ status->band = dev->chandef.chan->band;
+
+ mt76_mac_process_rate(status, rate);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ if (mt7601u_rx_is_our_beacon(dev, data))
+ mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
+ else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
+ dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ return MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ memset(key_data, 0, 32);
+ if (!key)
+ return MT_CIPHER_NONE;
+
+ if (key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u8 iv_data[8];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_set_key(dev, idx);
+
+ mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+ memset(iv_data, 0, sizeof(iv_data));
+ if (key) {
+ iv_data[3] = key->keyidx << 6;
+ if (cipher >= MT_CIPHER_TKIP) {
+ /* Note: start with 1 to comply with spec,
+ * (see comment on common/cmm_wpa.c:4291).
+ */
+ iv_data[0] |= 1;
+ iv_data[3] |= 0x20;
+ }
+ }
+ mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+ val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
+ val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+ val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val &= ~MT_WCID_ATTR_PAIRWISE;
+ val |= MT_WCID_ATTR_PAIRWISE *
+ !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
+
+ return 0;
+}
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key)
+{
+ enum mt76_cipher_type cipher;
+ u8 key_data[32];
+ u32 val;
+
+ cipher = mt76_mac_get_key_info(key, key_data);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EINVAL;
+
+ trace_set_shared_key(dev, vif_idx, key_idx);
+
+ mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+ key_data, sizeof(key_data));
+
+ val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+ val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+ val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+ mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h
new file mode 100644
index 000000000000..2c22d63c63a2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+struct mt76_tx_status {
+ u8 valid:1;
+ u8 success:1;
+ u8 aggr:1;
+ u8 ack_req:1;
+ u8 is_probe:1;
+ u8 wcid;
+ u8 pktid;
+ u8 retry;
+ u16 rate;
+} __packed __aligned(2);
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ * are called for MT7601U, names used by this driver are educated guesses
+ * (see vendor mac/ral_omac.c).
+ */
+struct mt7601u_rxwi {
+ __le32 rxinfo;
+
+ __le32 ctl;
+
+ __le16 frag_sn;
+ __le16 rate;
+
+ u8 unknown;
+ u8 zero[3];
+
+ u8 snr;
+ u8 ant;
+ u8 gain;
+ u8 freq_off;
+
+ __le32 resv2;
+ __le32 expert_ant;
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA BIT(0)
+#define MT_RXINFO_DATA BIT(1)
+#define MT_RXINFO_NULL BIT(2)
+#define MT_RXINFO_FRAG BIT(3)
+#define MT_RXINFO_U2M BIT(4)
+#define MT_RXINFO_MULTICAST BIT(5)
+#define MT_RXINFO_BROADCAST BIT(6)
+#define MT_RXINFO_MYBSS BIT(7)
+#define MT_RXINFO_CRCERR BIT(8)
+#define MT_RXINFO_ICVERR BIT(9)
+#define MT_RXINFO_MICERR BIT(10)
+#define MT_RXINFO_AMSDU BIT(11)
+#define MT_RXINFO_HTC BIT(12)
+#define MT_RXINFO_RSSI BIT(13)
+#define MT_RXINFO_L2PAD BIT(14)
+#define MT_RXINFO_AMPDU BIT(15)
+#define MT_RXINFO_DECRYPT BIT(16)
+#define MT_RXINFO_BSSIDX3 BIT(17)
+#define MT_RXINFO_WAPI_KEY BIT(18)
+#define MT_RXINFO_PN_LEN GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211 BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR BIT(30)
+#define MT_RXINFO_IP_SUM_ERR BIT(31)
+
+#define MT_RXWI_CTL_WCID GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16)
+#define MT_RXWI_CTL_TID GENMASK(31, 28)
+
+#define MT_RXWI_FRAG GENMASK(3, 0)
+#define MT_RXWI_SN GENMASK(15, 4)
+
+#define MT_RXWI_RATE_MCS GENMASK(6, 0)
+#define MT_RXWI_RATE_BW BIT(7)
+#define MT_RXWI_RATE_SGI BIT(8)
+#define MT_RXWI_RATE_STBC GENMASK(10, 9)
+#define MT_RXWI_RATE_ETXBF BIT(11)
+#define MT_RXWI_RATE_SND BIT(12)
+#define MT_RXWI_RATE_ITXBF BIT(13)
+#define MT_RXWI_RATE_PHY GENMASK(15, 14)
+
+#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0)
+
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+};
+
+enum mt76_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+};
+
+struct mt76_txwi {
+ __le16 flags;
+ __le16 rate_ctl;
+
+ u8 ack_ctl;
+ u8 wcid;
+ __le16 len_ctl;
+
+ __le32 iv;
+
+ __le32 eiv;
+
+ u8 aid;
+ u8 txstream;
+ __le16 ctl;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG BIT(0)
+#define MT_TXWI_FLAGS_MMPS BIT(1)
+#define MT_TXWI_FLAGS_CFACK BIT(2)
+#define MT_TXWI_FLAGS_TS BIT(3)
+#define MT_TXWI_FLAGS_AMPDU BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
+
+#define MT_TXWI_RATE_MCS GENMASK(6, 0)
+#define MT_TXWI_RATE_BW BIT(7)
+#define MT_TXWI_RATE_SGI BIT(8)
+#define MT_TXWI_RATE_STBC GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0)
+#define MT_TXWI_LEN_PKTID GENMASK(15, 12)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4)
+#define MT_TXWI_CTL_PIFS_REV BIT(6)
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ u8 *data, void *rxi);
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+ struct ieee80211_key_conf *key);
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+ const struct ieee80211_tx_rate *rate);
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+ struct ieee80211_key_conf *key);
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+ const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
new file mode 100644
index 000000000000..ced82abb414f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+
+static int mt7601u_start(struct ieee80211_hw *hw)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ int ret;
+
+ mutex_lock(&dev->mutex);
+
+ ret = mt7601u_mac_start(dev);
+ if (ret)
+ goto out;
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+out:
+ mutex_unlock(&dev->mutex);
+ return ret;
+}
+
+static void mt7601u_stop(struct ieee80211_hw *hw)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
+ mt7601u_mac_stop(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int mt7601u_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int idx = 0;
+ unsigned int wcid = GROUP_WCID(idx);
+
+ /* Note: for AP do the AP-STA things mt76 does:
+ * - beacon offsets
+ * - do mac address tricks
+ * - shift vif idx
+ */
+ mvif->idx = idx;
+
+ if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
+ return -ENOSPC;
+ dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
+ mvif->group_wcid.idx = wcid;
+ mvif->group_wcid.hw_key_idx = -1;
+
+ return 0;
+}
+
+static void mt7601u_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ unsigned int wcid = mvif->group_wcid.idx;
+
+ dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ieee80211_stop_queues(hw);
+ ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+ }
+
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->rxfilter &= ~(_hw); \
+ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mutex);
+
+ dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+ MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+ MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+ MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+ MT_RX_FILTR_CFG_CTS |
+ MT_RX_FILTR_CFG_CFEND |
+ MT_RX_FILTR_CFG_CFACK |
+ MT_RX_FILTR_CFG_BA |
+ MT_RX_FILTR_CFG_CTRL_RSV);
+ MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static void
+mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7601u_phy_con_cal_onoff(dev, info);
+
+ if (changed & BSS_CHANGED_BSSID) {
+ mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+ /* Note: this is a hack because beacon_int is not changed
+ * on leave nor is any more appropriate event generated.
+ * rt2x00 doesn't seem to be bothered though.
+ */
+ if (is_zero_ether_addr(info->bssid))
+ mt7601u_mac_config_tsf(dev, false, 0);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+ mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+ mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+ mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+ mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT)
+ mt7601u_mac_config_tsf(dev, true, info->beacon_int);
+
+ if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7601u_mac_set_protection(dev, info->use_cts_prot,
+ info->ht_operation_mode);
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC)
+ mt7601u_phy_recalibrate_after_assoc(dev);
+
+ mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76_wcid_alloc(struct mt7601u_dev *dev)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+ idx = ffs(~dev->wcid_mask[i]);
+ if (!idx)
+ continue;
+
+ idx--;
+ dev->wcid_mask[i] |= BIT(idx);
+ break;
+ }
+
+ idx = i * BITS_PER_LONG + idx;
+ if (idx > 119)
+ return -1;
+
+ return idx;
+}
+
+static int
+mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ int ret = 0;
+ int idx = 0;
+
+ mutex_lock(&dev->mutex);
+
+ idx = mt76_wcid_alloc(dev);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ msta->wcid.idx = idx;
+ msta->wcid.hw_key_idx = -1;
+ mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+ mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+ mt7601u_mac_set_ampdu_factor(dev);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static int
+mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mutex_lock(&dev->mutex);
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+ dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+ mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
+ mt7601u_mac_set_ampdu_factor(dev);
+ mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
+static void
+mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt7601u_sw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt7601u_agc_save(dev);
+ set_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static void
+mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt7601u_agc_restore(dev);
+ clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static int
+mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+ struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+ struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ int idx = key->keyidx;
+ int ret;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+
+ if (!msta) {
+ if (key || wcid->hw_key_idx == idx) {
+ ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
+ }
+
+ return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct mt7601u_dev *dev = hw->priv;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+ return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+ WARN_ON(msta->wcid.idx > GROUP_WCID(0));
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+ BIT(16 + tid));
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ msta->agg_ssn[tid] = *ssn << 4;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+ struct ieee80211_sta_rates *rates;
+ struct ieee80211_tx_rate rate = {};
+
+ rcu_read_lock();
+ rates = rcu_dereference(sta->rates);
+
+ if (!rates)
+ goto out;
+
+ rate.idx = rates->rate[0].idx;
+ rate.flags = rates->rate[0].flags;
+ mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+ rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt7601u_ops = {
+ .tx = mt7601u_tx,
+ .start = mt7601u_start,
+ .stop = mt7601u_stop,
+ .add_interface = mt7601u_add_interface,
+ .remove_interface = mt7601u_remove_interface,
+ .config = mt7601u_config,
+ .configure_filter = mt76_configure_filter,
+ .bss_info_changed = mt7601u_bss_info_changed,
+ .sta_add = mt7601u_sta_add,
+ .sta_remove = mt7601u_sta_remove,
+ .sta_notify = mt7601u_sta_notify,
+ .set_key = mt7601u_set_key,
+ .conf_tx = mt7601u_conf_tx,
+ .sw_scan_start = mt7601u_sw_scan,
+ .sw_scan_complete = mt7601u_sw_scan_complete,
+ .ampdu_action = mt76_ampdu_action,
+ .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+ .set_rts_threshold = mt7601u_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
new file mode 100644
index 000000000000..fbb1986eda3c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -0,0 +1,534 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD 0x3800
+#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE 1024
+
+static inline int firmware_running(struct mt7601u_dev *dev)
+{
+ return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+ put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
+ u8 seq, enum mcu_cmd cmd)
+{
+ WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+ MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
+ MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+}
+
+static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
+ struct sk_buff *skb, bool need_resp)
+{
+ u32 i, csum = 0;
+
+ for (i = 0; i < skb->len / 4; i++)
+ csum ^= get_unaligned_le32(skb->data + i * 4);
+
+ trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
+{
+ struct sk_buff *skb;
+
+ WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ memcpy(skb_put(skb, len), data, len);
+
+ return skb;
+}
+
+static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
+{
+ struct urb *urb = dev->mcu.resp.urb;
+ u32 rxfce;
+ int urb_status, ret, i = 5;
+
+ while (i--) {
+ if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+ msecs_to_jiffies(300))) {
+ dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
+ continue;
+ }
+
+ /* Make copies of important data before reusing the urb */
+ rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+ urb_status = urb->status * mt7601u_urb_has_error(urb);
+
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt7601u_complete_urb,
+ &dev->mcu.resp_cmpl);
+ if (ret)
+ return ret;
+
+ if (urb_status)
+ dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
+ urb_status);
+
+ if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ return 0;
+
+ dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
+ MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ }
+
+ dev_err(dev->dev, "Error: %s timed out\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static int
+mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
+ enum mcu_cmd cmd, bool wait_resp)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+ dev->out_eps[MT_EP_OUT_INBAND_CMD]);
+ int sent, ret;
+ u8 seq = 0;
+
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&dev->mcu.mutex);
+
+ if (wait_resp)
+ while (!seq)
+ seq = ++dev->mcu.msg_seq & 0xf;
+
+ mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
+
+ if (dev->mcu.resp_cmpl.done)
+ dev_err(dev->dev, "Error: MCU response pre-completed!\n");
+
+ trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
+ trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
+ ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+ if (ret) {
+ dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
+ goto out;
+ }
+ if (sent != skb->len)
+ dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
+
+ if (wait_resp)
+ ret = mt7601u_mcu_wait_resp(dev, seq);
+out:
+ mutex_unlock(&dev->mcu.mutex);
+
+ consume_skb(skb);
+
+ return ret;
+}
+
+static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
+ enum mcu_function func, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(func),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
+{
+ int ret;
+
+ if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
+ return 0;
+
+ ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
+ use_hvga);
+ if (ret) {
+ dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
+ return ret;
+ }
+
+ dev->tssi_read_trig = true;
+
+ return 0;
+}
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+ struct sk_buff *skb;
+ struct {
+ __le32 id;
+ __le32 value;
+ } __packed __aligned(4) msg = {
+ .id = cpu_to_le32(cal),
+ .value = cpu_to_le32(val),
+ };
+
+ skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+ return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_vals_per_cmd, n);
+
+ skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ for (i = 0; i < cnt; i++) {
+ skb_put_le32(skb, base + data[i].reg);
+ skb_put_le32(skb, data[i].value);
+ }
+
+ ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+ const u32 *data, int n)
+{
+ const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+ struct sk_buff *skb;
+ int cnt, i, ret;
+
+ if (!n)
+ return 0;
+
+ cnt = min(max_regs_per_cmd, n);
+
+ skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+
+ skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+ for (i = 0; i < cnt; i++)
+ skb_put_le32(skb, data[i]);
+
+ ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+ if (ret)
+ return ret;
+
+ return mt7601u_burst_write_regs(dev, offset + cnt * 4,
+ data + cnt, n - cnt);
+}
+
+struct mt76_fw_header {
+ __le32 ilm_len;
+ __le32 dlm_len;
+ __le16 build_ver;
+ __le16 fw_ver;
+ u8 pad[4];
+ char build_time[16];
+};
+
+struct mt76_fw {
+ struct mt76_fw_header hdr;
+ u8 ivb[MT_MCU_IVB_SIZE];
+ u8 ilm[];
+};
+
+static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
+ const struct mt7601u_dma_buf *dma_buf,
+ const void *data, u32 len, u32 dst_addr)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
+ __le32 reg;
+ u32 val;
+ int ret;
+
+ reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
+ MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ MT76_SET(MT_TXD_INFO_LEN, len));
+ memcpy(buf.buf, &reg, sizeof(reg));
+ memcpy(buf.buf + sizeof(reg), data, len);
+ memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+ ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_ADDR, dst_addr);
+ if (ret)
+ return ret;
+ len = roundup(len, 4);
+ ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+ MT_FCE_DMA_LEN, len << 16);
+ if (ret)
+ return ret;
+
+ buf.len = MT_DMA_HDR_LEN + len + 4;
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+ &buf, GFP_KERNEL,
+ mt7601u_complete_urb, &cmpl);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+ dev_err(dev->dev, "Error: firmware upload timed out\n");
+ usb_kill_urb(buf.urb);
+ return -ETIMEDOUT;
+ }
+ if (mt7601u_urb_has_error(buf.urb)) {
+ dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
+ buf.urb->status);
+ return buf.urb->status;
+ }
+
+ val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+ val++;
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+ return 0;
+}
+
+static int
+mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
+ const void *data, int len, u32 dst_addr)
+{
+ int n, ret;
+
+ if (len == 0)
+ return 0;
+
+ n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+ ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+ return -ETIMEDOUT;
+
+ return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
+{
+ struct mt7601u_dma_buf dma_buf;
+ void *ivb;
+ u32 ilm_len, dlm_len;
+ int i, ret;
+
+ ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+ if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+ dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
+ ilm_len, sizeof(fw->ivb));
+ ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+ if (ret)
+ goto error;
+
+ dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+ dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
+ ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+ dlm_len, MT_MCU_DLM_OFFSET);
+ if (ret)
+ goto error;
+
+ ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ 0x12, 0, ivb, sizeof(fw->ivb));
+ if (ret < 0)
+ goto error;
+ ret = 0;
+
+ for (i = 100; i && !firmware_running(dev); i--)
+ msleep(10);
+ if (!i) {
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+
+ dev_dbg(dev->dev, "Firmware running!\n");
+error:
+ kfree(ivb);
+ mt7601u_usb_free_buf(dev, &dma_buf);
+
+ return ret;
+}
+
+static int mt7601u_load_firmware(struct mt7601u_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt76_fw_header *hdr;
+ int len, ret;
+ u32 val;
+
+ mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+
+ if (firmware_running(dev))
+ return 0;
+
+ ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr))
+ goto err_inv_fw;
+
+ hdr = (const struct mt76_fw_header *) fw->data;
+
+ if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+ goto err_inv_fw;
+
+ len = sizeof(*hdr);
+ len += le32_to_cpu(hdr->ilm_len);
+ len += le32_to_cpu(hdr->dlm_len);
+
+ if (fw->size != len)
+ goto err_inv_fw;
+
+ val = le16_to_cpu(hdr->fw_ver);
+ dev_info(dev->dev,
+ "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+ (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+ le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+ len = le32_to_cpu(hdr->ilm_len);
+
+ mt7601u_wr(dev, 0x94c, 0);
+ mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
+
+ mt7601u_vendor_reset(dev);
+ msleep(5);
+
+ mt7601u_wr(dev, 0xa44, 0);
+ mt7601u_wr(dev, 0x230, 0x84210);
+ mt7601u_wr(dev, 0x400, 0x80c00);
+ mt7601u_wr(dev, 0x800, 1);
+
+ mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+ MT_PBF_CFG_TX1Q_EN |
+ MT_PBF_CFG_TX2Q_EN |
+ MT_PBF_CFG_TX3Q_EN));
+
+ mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+ mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+ MT_USB_DMA_CFG_TX_BULK_EN));
+ val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
+ val &= ~MT_USB_DMA_CFG_TX_CLR;
+ mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+ /* FCE tx_fs_base_ptr */
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+ /* FCE tx_fs_max_cnt */
+ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+ /* FCE pdma enable */
+ mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+ /* FCE skip_fs_en */
+ mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
+
+ ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+
+ release_firmware(fw);
+
+ return ret;
+
+err_inv_fw:
+ dev_err(dev->dev, "Invalid firmware image\n");
+ release_firmware(fw);
+ return -ENOENT;
+}
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ mutex_init(&dev->mcu.mutex);
+
+ ret = mt7601u_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
+
+ return 0;
+}
+
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
+ if (ret)
+ return ret;
+
+ init_completion(&dev->mcu.resp_cmpl);
+ if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+ return -ENOMEM;
+ }
+
+ ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+ &dev->mcu.resp, GFP_KERNEL,
+ mt7601u_complete_urb, &dev->mcu.resp_cmpl);
+ if (ret) {
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
+{
+ usb_kill_urb(dev->mcu.resp.urb);
+ mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h
new file mode 100644
index 000000000000..4a66d1092a18
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_MCU_H
+#define __MT7601U_MCU_H
+
+struct mt7601u_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL 0x070C
+#define MT_MCU_INT_LEVEL 0x0718
+#define MT_MCU_COM_REG0 0x0730
+#define MT_MCU_COM_REG1 0x0734
+#define MT_MCU_COM_REG2 0x0738
+#define MT_MCU_COM_REG3 0x073C
+
+#define MT_MCU_IVB_SIZE 0x40
+#define MT_MCU_DLM_OFFSET 0x80000
+
+#define MT_MCU_MEMMAP_WLAN 0x00410000
+#define MT_MCU_MEMMAP_BBP 0x40000000
+#define MT_MCU_MEMMAP_RF 0x80000000
+
+#define INBAND_PACKET_MAX_LEN 192
+
+enum mcu_cmd {
+ CMD_FUN_SET_OP = 1,
+ CMD_LOAD_CR = 2,
+ CMD_INIT_GAIN_OP = 3,
+ CMD_DYNC_VGA_OP = 6,
+ CMD_TDLS_CH_SW = 7,
+ CMD_BURST_WRITE = 8,
+ CMD_READ_MODIFY_WRITE = 9,
+ CMD_RANDOM_READ = 10,
+ CMD_BURST_READ = 11,
+ CMD_RANDOM_WRITE = 12,
+ CMD_LED_MODE_OP = 16,
+ CMD_POWER_SAVING_OP = 20,
+ CMD_WOW_CONFIG = 21,
+ CMD_WOW_QUERY = 22,
+ CMD_WOW_FEATURE = 24,
+ CMD_CARRIER_DETECT_OP = 28,
+ CMD_RADOR_DETECT_OP = 29,
+ CMD_SWITCH_CHANNEL_OP = 30,
+ CMD_CALIBRATION_OP = 31,
+ CMD_BEACON_OP = 32,
+ CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+ Q_SELECT = 1,
+ ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+ RADIO_OFF = 0x30,
+ RADIO_ON = 0x31,
+ RADIO_OFF_AUTO_WAKEUP = 0x32,
+ RADIO_OFF_ADVANCE = 0x33,
+ RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+ MCU_CAL_R = 1,
+ MCU_CAL_DCOC,
+ MCU_CAL_LC,
+ MCU_CAL_LOFT,
+ MCU_CAL_TXIQ,
+ MCU_CAL_BW,
+ MCU_CAL_DPD,
+ MCU_CAL_RXIQ,
+ MCU_CAL_TXDCOC,
+};
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev);
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
new file mode 100644
index 000000000000..9102be6b95cb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT7601U_H
+#define MT7601U_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "regs.h"
+#include "util.h"
+
+#define MT_CALIBRATE_INTERVAL (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
+
+#define MT_BBP_REG_VERSION 0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT 28 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
+#define MT_RX_ORDER 3
+#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt7601u_dma_buf {
+ struct urb *urb;
+ void *buf;
+ dma_addr_t dma;
+ size_t len;
+};
+
+struct mt7601u_mcu {
+ struct mutex mutex;
+
+ u8 msg_seq;
+
+ struct mt7601u_dma_buf resp;
+ struct completion resp_cmpl;
+};
+
+struct mt7601u_freq_cal {
+ struct delayed_work work;
+ u8 freq;
+ bool enabled;
+ bool adjusting;
+};
+
+struct mac_stats {
+ u64 rx_stat[6];
+ u64 tx_stat[6];
+ u64 aggr_stat[2];
+ u64 aggr_n[32];
+ u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES 16
+struct mt7601u_rx_queue {
+ struct mt7601u_dev *dev;
+
+ struct mt7601u_dma_buf_rx {
+ struct urb *urb;
+ struct page *p;
+ } e[N_RX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int pending;
+};
+
+#define N_TX_ENTRIES 64
+
+struct mt7601u_tx_queue {
+ struct mt7601u_dev *dev;
+
+ struct mt7601u_dma_buf_tx {
+ struct urb *urb;
+ struct sk_buff *skb;
+ } e[N_TX_ENTRIES];
+
+ unsigned int start;
+ unsigned int end;
+ unsigned int entries;
+ unsigned int used;
+ unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ * 0: mcast wcid
+ * 1: bssid wcid
+ * 1...: STAs
+ * ...7e: group wcids
+ * 7f: reserved
+ */
+#define N_WCIDS 128
+#define GROUP_WCID(idx) (N_WCIDS - 2 - idx)
+
+struct mt7601u_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE 39
+#define MT_FREQ_OFFSET_INVALID -128
+
+enum mt_temp_mode {
+ MT_TEMP_MODE_NORMAL,
+ MT_TEMP_MODE_HIGH,
+ MT_TEMP_MODE_LOW,
+};
+
+enum mt_bw {
+ MT_BW_20,
+ MT_BW_40,
+};
+
+enum {
+ MT7601U_STATE_INITIALIZED,
+ MT7601U_STATE_REMOVED,
+ MT7601U_STATE_WLAN_RUNNING,
+ MT7601U_STATE_MCU_RUNNING,
+ MT7601U_STATE_SCANNING,
+ MT7601U_STATE_READING_STATS,
+ MT7601U_STATE_MORE_STATS,
+};
+
+/**
+ * struct mt7601u_dev - adapter structure
+ * @lock: protects @wcid->tx_rate.
+ * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS
+ flags in @state.
+ * @rx_lock: protects @rx_q.
+ * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex: ensures exclusive access from mac80211 callbacks.
+ * @vendor_req_mutex: ensures atomicity of vendor requests.
+ * @reg_atomic_mutex: ensures atomicity of indirect register accesses
+ * (accesses to RF and BBP).
+ * @hw_atomic_mutex: ensures exclusive access to HW during critical
+ * operations (power management, channel switch).
+ */
+struct mt7601u_dev {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ unsigned long state;
+
+ struct mutex mutex;
+
+ unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
+
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_supported_band *sband_2g;
+
+ struct mt7601u_mcu mcu;
+
+ struct delayed_work cal_work;
+ struct delayed_work mac_work;
+
+ struct workqueue_struct *stat_wq;
+ struct delayed_work stat_work;
+
+ struct mt76_wcid *mon_wcid;
+ struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+ spinlock_t lock;
+
+ const u16 *beacon_offsets;
+
+ u8 macaddr[ETH_ALEN];
+ struct mt7601u_eeprom_params *ee;
+
+ struct mutex vendor_req_mutex;
+ struct mutex reg_atomic_mutex;
+ struct mutex hw_atomic_mutex;
+
+ u32 rxfilter;
+ u32 debugfs_reg;
+
+ u8 out_eps[8];
+ u8 in_eps[8];
+ u16 out_max_packet;
+ u16 in_max_packet;
+
+ /* TX */
+ spinlock_t tx_lock;
+ struct mt7601u_tx_queue *tx_q;
+
+ atomic_t avg_ampdu_len;
+
+ /* RX */
+ spinlock_t rx_lock;
+ struct tasklet_struct rx_tasklet;
+ struct mt7601u_rx_queue rx_q;
+
+ /* Connection monitoring things */
+ spinlock_t con_mon_lock;
+ u8 ap_bssid[ETH_ALEN];
+
+ s8 bcn_freq_off;
+ u8 bcn_phy_mode;
+
+ int avg_rssi; /* starts at 0 and converges */
+
+ u8 agc_save;
+
+ struct mt7601u_freq_cal freq_cal;
+
+ bool tssi_read_trig;
+
+ s8 tssi_init;
+ s8 tssi_init_hvga;
+ s16 tssi_init_hvga_offset_db;
+
+ int prev_pwr_diff;
+
+ enum mt_temp_mode temp_mode;
+ int curr_temp;
+ int dpd_temp;
+ s8 raw_temp;
+ bool pll_lock_protect;
+
+ u8 bw;
+ bool chan_ext_below;
+
+ /* PA mode */
+ u32 rf_pa_mode[2];
+
+ struct mac_stats stats;
+};
+
+struct mt7601u_tssi_params {
+ char tssi0;
+ int trgt_power;
+};
+
+struct mt76_wcid {
+ u8 idx;
+ u8 hw_key_idx;
+
+ u16 tx_rate;
+ bool tx_rate_set;
+ u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+ u8 idx;
+
+ struct mt76_wcid group_wcid;
+};
+
+struct mt76_sta {
+ struct mt76_wcid wcid;
+ u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+ u32 reg;
+ u32 value;
+};
+
+struct mt7601u_rxwi;
+
+extern const struct ieee80211_ops mt7601u_ops;
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev);
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+ const void *data, int len);
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val) \
+ mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+
+static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ return mt7601u_rr(dev, offset);
+}
+
+static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt7601u_wr(dev, offset, val);
+}
+
+static inline u32
+mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ return mt7601u_rmw(dev, offset, mask, val);
+}
+
+static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt76_rmw(dev, offset, 0, val);
+}
+
+static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ return mt76_rmw(dev, offset, val, 0);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len);
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+ const u32 *data, int n);
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
+int mt7601u_init_hardware(struct mt7601u_dev *dev);
+int mt7601u_register_device(struct mt7601u_dev *dev);
+void mt7601u_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_mac_start(struct mt7601u_dev *dev);
+void mt7601u_mac_stop(struct mt7601u_dev *dev);
+
+/* PHY */
+int mt7601u_phy_init(struct mt7601u_dev *dev);
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
+void mt7601u_agc_save(struct mt7601u_dev *dev);
+void mt7601u_agc_restore(struct mt7601u_dev *dev);
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef);
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, u16 rate);
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+ struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt7601u_mac_work(struct work_struct *work);
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+ int ht_mode);
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
+
+/* TX */
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
+void mt7601u_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76_remove_hdr_pad(struct sk_buff *skb);
+int mt76_insert_hdr_pad(struct sk_buff *skb);
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
+
+static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+ return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev);
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
new file mode 100644
index 000000000000..1908af6add87
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -0,0 +1,1251 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev);
+
+static int
+mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
+{
+ int ret = 0;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ WARN_ON(offset > 63))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
+ MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+ MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+ trace_rf_write(dev, bank, offset, value);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n",
+ bank, offset, ret);
+
+ return ret;
+}
+
+static int
+mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
+{
+ int ret = -ETIMEDOUT;
+ u32 val;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ WARN_ON(offset > 63))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0xff;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+ MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_KICK);
+
+ if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+ goto out;
+
+ val = mt7601u_rr(dev, MT_RF_CSR_CFG);
+ if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+ MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+ trace_rf_read(dev, bank, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n",
+ bank, offset, ret);
+
+ return ret;
+}
+
+static int
+mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_rf_rr(dev, bank, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ ret = mt7601u_rf_wr(dev, bank, offset, val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int
+mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val)
+{
+ return mt7601u_rf_rmw(dev, bank, offset, 0, val);
+}
+
+static int
+mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask)
+{
+ return mt7601u_rf_rmw(dev, bank, offset, mask, 0);
+}
+
+static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
+{
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+ test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) {
+ dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset);
+ goto out;
+ }
+
+ mt7601u_wr(dev, MT_BBP_CSR_CFG,
+ MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
+ MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
+ trace_bbp_write(dev, offset, val);
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+}
+
+static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
+{
+ u32 val;
+ int ret = -ETIMEDOUT;
+
+ if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)))
+ return -EINVAL;
+ if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+ return 0xff;
+
+ mutex_lock(&dev->reg_atomic_mutex);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+ goto out;
+
+ mt7601u_wr(dev, MT_BBP_CSR_CFG,
+ MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
+ MT_BBP_CSR_CFG_READ);
+
+ if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+ goto out;
+
+ val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
+ if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+ ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+ trace_bbp_read(dev, offset, ret);
+ }
+out:
+ mutex_unlock(&dev->reg_atomic_mutex);
+
+ if (ret < 0)
+ dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n",
+ offset, ret);
+
+ return ret;
+}
+
+static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_bbp_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ mt7601u_bbp_wr(dev, offset, val);
+
+ return val;
+}
+
+static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+ int ret;
+
+ ret = mt7601u_bbp_rr(dev, offset);
+ if (ret < 0)
+ return ret;
+ val |= ret & ~mask;
+ if (ret != val)
+ mt7601u_bbp_wr(dev, offset, val);
+
+ return val;
+}
+
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+{
+ int i = 20;
+ u8 val;
+
+ do {
+ val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+ if (val && ~val)
+ break;
+ } while (--i);
+
+ if (!i) {
+ dev_err(dev->dev, "Error: BBP is not ready\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+ return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0);
+}
+
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+ struct mt7601u_rxwi *rxwi, u16 rate)
+{
+ static const s8 lna[2][2][3] = {
+ /* main LNA */ {
+ /* bw20 */ { -2, 15, 33 },
+ /* bw40 */ { 0, 16, 34 }
+ },
+ /* aux LNA */ {
+ /* bw20 */ { -2, 15, 33 },
+ /* bw40 */ { -2, 16, 34 }
+ }
+ };
+ int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
+ int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+ int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+ int val;
+
+ if (lna_id) /* LNA id can be 0, 2, 3. */
+ lna_id--;
+
+ val = 8;
+ val -= lna[aux_lna][bw][lna_id];
+ val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+ val -= dev->ee->lna_gain;
+ val -= dev->ee->rssi_offset[0];
+
+ return val;
+}
+
+static void mt7601u_vco_cal(struct mt7601u_dev *dev)
+{
+ mt7601u_rf_wr(dev, 0, 4, 0x0a);
+ mt7601u_rf_wr(dev, 0, 5, 0x20);
+ mt7601u_rf_set(dev, 0, 4, BIT(7));
+ msleep(2);
+}
+
+static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal)
+{
+ u32 filter = 0;
+ int ret;
+
+ if (!cal)
+ filter |= 0x10000;
+ if (dev->bw != MT_BW_20)
+ filter |= 0x00100;
+
+ /* TX */
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1);
+ if (ret)
+ return ret;
+ /* RX */
+ return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter);
+}
+
+static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev)
+{
+ const struct reg_table *t;
+
+ if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW))
+ return -EINVAL;
+
+ t = &bbp_mode_table[dev->temp_mode][dev->bw];
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
+}
+
+static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name)
+{
+ const struct reg_table *t;
+ int ret;
+
+ if (dev->temp_mode == mode)
+ return 0;
+
+ dev->temp_mode = mode;
+ trace_temp_mode(dev, mode);
+
+ t = bbp_mode_table[dev->temp_mode];
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ t[2].regs, t[2].n);
+ if (ret)
+ return ret;
+
+ return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ t[dev->bw].regs, t[dev->bw].n);
+}
+
+static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan)
+{
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+ if (hw_chan != 14 || dev->bw != MT_BW_20) {
+ mt7601u_bbp_rmw(dev, 4, 0x20, 0);
+ mt7601u_bbp_wr(dev, 178, 0xff);
+
+ t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
+ t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
+ } else { /* Apply CH14 OBW fixup */
+ mt7601u_bbp_wr(dev, 4, 0x60);
+ mt7601u_bbp_wr(dev, 178, 0);
+
+ /* Note: vendor code is buggy here for negative values */
+ t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
+ t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
+ }
+}
+
+static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+#define FREQ_PLAN_REGS 4
+ static const u8 freq_plan[14][FREQ_PLAN_REGS] = {
+ { 0x99, 0x99, 0x09, 0x50 },
+ { 0x46, 0x44, 0x0a, 0x50 },
+ { 0xec, 0xee, 0x0a, 0x50 },
+ { 0x99, 0x99, 0x0b, 0x50 },
+ { 0x46, 0x44, 0x08, 0x51 },
+ { 0xec, 0xee, 0x08, 0x51 },
+ { 0x99, 0x99, 0x09, 0x51 },
+ { 0x46, 0x44, 0x0a, 0x51 },
+ { 0xec, 0xee, 0x0a, 0x51 },
+ { 0x99, 0x99, 0x0b, 0x51 },
+ { 0x46, 0x44, 0x08, 0x52 },
+ { 0xec, 0xee, 0x08, 0x52 },
+ { 0x99, 0x99, 0x09, 0x52 },
+ { 0x33, 0x33, 0x0b, 0x52 },
+ };
+ struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = {
+ { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 },
+ };
+ struct mt76_reg_pair bbp_settings[3] = {
+ { 62, 0x37 - dev->ee->lna_gain },
+ { 63, 0x37 - dev->ee->lna_gain },
+ { 64, 0x37 - dev->ee->lna_gain },
+ };
+
+ struct ieee80211_channel *chan = chandef->chan;
+ enum nl80211_channel_type chan_type =
+ cfg80211_get_chandef_type(chandef);
+ struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+ int chan_idx;
+ bool chan_ext_below;
+ u8 bw;
+ int i, ret;
+
+ bw = MT_BW_20;
+ chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS);
+ chan_idx = chan->hw_value - 1;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_40) {
+ bw = MT_BW_40;
+
+ if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS)
+ chan_idx -= 2;
+ else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS)
+ chan_idx += 2;
+ else
+ dev_err(dev->dev, "Error: invalid 40MHz channel!!\n");
+ }
+
+ if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) {
+ dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n",
+ bw, chan_ext_below);
+
+ mt7601u_bbp_set_bw(dev, bw);
+
+ mt7601u_bbp_set_ctrlch(dev, chan_ext_below);
+ mt7601u_mac_set_ctrlch(dev, chan_ext_below);
+ dev->chan_ext_below = chan_ext_below;
+ }
+
+ for (i = 0; i < FREQ_PLAN_REGS; i++)
+ channel_freq_plan[i].value = freq_plan[chan_idx][i];
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF,
+ channel_freq_plan, FREQ_PLAN_REGS);
+ if (ret)
+ return ret;
+
+ mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f,
+ dev->ee->chan_pwr[chan_idx] & 0x3f);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ bbp_settings, ARRAY_SIZE(bbp_settings));
+ if (ret)
+ return ret;
+
+ mt7601u_vco_cal(dev);
+ mt7601u_bbp_set_bw(dev, bw);
+ ret = mt7601u_set_bw_filter(dev, false);
+ if (ret)
+ return ret;
+
+ mt7601u_apply_ch14_fixup(dev, chan->hw_value);
+ mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
+ int_to_s6(t->ofdm[0].bw20) << 16 |
+ int_to_s6(t->cck[1].bw20) << 8 |
+ int_to_s6(t->cck[0].bw20));
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ mt7601u_agc_reset(dev);
+
+ dev->chandef = *chandef;
+
+ return 0;
+}
+
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->freq_cal.work);
+
+ mutex_lock(&dev->hw_atomic_mutex);
+ ret = __mt7601u_phy_set_channel(dev, chandef);
+ mutex_unlock(&dev->hw_atomic_mutex);
+ if (ret)
+ return ret;
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ return 0;
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+ if (dev->freq_cal.enabled)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+ return 0;
+}
+
+#define BBP_R47_FLAG GENMASK(2, 0)
+#define BBP_R47_F_TSSI 0
+#define BBP_R47_F_PKT_T 1
+#define BBP_R47_F_TX_RATE 2
+#define BBP_R47_F_TEMP 4
+/**
+ * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair
+ * @dev: pointer to adapter structure
+ * @reg: value of BBP R47 before the operation
+ * @flag: one of the BBP_R47_F_* flags
+ *
+ * Convenience helper for reading values through BBP R47/R49 pair.
+ * Takes old value of BBP R47 as @reg, because callers usually have it
+ * cached already.
+ *
+ * Return: value of BBP R49.
+ */
+static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag)
+{
+ flag |= reg & ~BBP_R47_FLAG;
+ mt7601u_bbp_wr(dev, 47, flag);
+ usleep_range(500, 700);
+ return mt7601u_bbp_rr(dev, 49);
+}
+
+static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev)
+{
+ u8 bbp_val, temp;
+ u32 rf_bp, rf_set;
+ int i;
+
+ rf_set = mt7601u_rr(dev, MT_RF_SETTING_0);
+ rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010);
+
+ bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10);
+
+ mt7601u_bbp_wr(dev, 22, 0x40);
+
+ for (i = 100; i && (bbp_val & 0x10); i--)
+ bbp_val = mt7601u_bbp_rr(dev, 47);
+
+ temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP);
+
+ mt7601u_bbp_wr(dev, 22, 0);
+
+ bbp_val = mt7601u_bbp_rr(dev, 21);
+ bbp_val |= 0x02;
+ mt7601u_bbp_wr(dev, 21, bbp_val);
+ bbp_val &= ~0x02;
+ mt7601u_bbp_wr(dev, 21, bbp_val);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, rf_set);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp);
+
+ trace_read_temp(dev, temp);
+ return temp;
+}
+
+static s8 mt7601u_read_temp(struct mt7601u_dev *dev)
+{
+ int i;
+ u8 val;
+ s8 temp;
+
+ val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10);
+
+ /* Note: this rarely succeeds, temp can change even if it fails. */
+ for (i = 100; i && (val & 0x10); i--)
+ val = mt7601u_bbp_rr(dev, 47);
+
+ temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP);
+
+ trace_read_temp(dev, temp);
+ return temp;
+}
+
+static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
+{
+ static const struct mt76_reg_pair intro[] = {
+ { 158, 0x8d }, { 159, 0xfc },
+ { 158, 0x8c }, { 159, 0x4c },
+ }, outro[] = {
+ { 158, 0x8d }, { 159, 0xe0 },
+ };
+ u32 mac_ctrl;
+ int i, ret;
+
+ mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ intro, ARRAY_SIZE(intro));
+ if (ret)
+ dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret);
+
+ for (i = 20; i; i--) {
+ usleep_range(300, 500);
+
+ mt7601u_bbp_wr(dev, 158, 0x8c);
+ if (mt7601u_bbp_rr(dev, 159) == 0x0c)
+ break;
+ }
+ if (!i)
+ dev_err(dev->dev, "%s timed out\n", __func__);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+ outro, ARRAY_SIZE(outro));
+ if (ret)
+ dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+}
+
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
+{
+ mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
+
+ mt7601u_rxdc_cal(dev);
+}
+
+/* Note: function copied from vendor driver */
+static s16 lin2dBd(u16 linear)
+{
+ short exp = 0;
+ unsigned int mantisa;
+ int app, dBd;
+
+ if (WARN_ON(!linear))
+ return -10000;
+
+ mantisa = linear;
+
+ exp = fls(mantisa) - 16;
+ if (exp > 0)
+ mantisa >>= exp;
+ else
+ mantisa <<= abs(exp);
+
+ if (mantisa <= 0xb800)
+ app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600);
+ else
+ app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00);
+ if (app < 0)
+ app = 0;
+
+ dBd = ((15 + exp) << 15) + app;
+ dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7);
+ dBd = (dBd >> 10);
+
+ return dBd;
+}
+
+static void
+mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db)
+{
+ struct tssi_data *d = &dev->ee->tssi_data;
+ int init_offset;
+
+ init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10;
+
+ mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+ int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP);
+}
+
+static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev)
+{
+ u8 rf_vga, rf_mixer, bbp_r47;
+ int i, j;
+ s8 res[4];
+ s16 tssi_init_db, tssi_init_hvga_db;
+
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030);
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+ mt7601u_bbp_wr(dev, 58, 0);
+ mt7601u_bbp_wr(dev, 241, 0x2);
+ mt7601u_bbp_wr(dev, 23, 0x8);
+ bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+ /* Set VGA gain */
+ rf_vga = mt7601u_rf_rr(dev, 5, 3);
+ mt7601u_rf_wr(dev, 5, 3, 8);
+
+ /* Mixer disable */
+ rf_mixer = mt7601u_rf_rr(dev, 4, 39);
+ mt7601u_rf_wr(dev, 4, 39, 0);
+
+ for (i = 0; i < 4; i++) {
+ mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0);
+
+ mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02);
+ mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11);
+
+ /* BBP TSSI initial and soft reset */
+ mt7601u_bbp_wr(dev, 22, 0);
+ mt7601u_bbp_wr(dev, 244, 0);
+
+ mt7601u_bbp_wr(dev, 21, 1);
+ udelay(1);
+ mt7601u_bbp_wr(dev, 21, 0);
+
+ /* TSSI measurement */
+ mt7601u_bbp_wr(dev, 47, 0x50);
+ mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40);
+
+ for (j = 20; j; j--)
+ if (!(mt7601u_bbp_rr(dev, 47) & 0x10))
+ break;
+ if (!j)
+ dev_err(dev->dev, "%s timed out\n", __func__);
+
+ /* TSSI read */
+ mt7601u_bbp_wr(dev, 47, 0x40);
+ res[i] = mt7601u_bbp_rr(dev, 49);
+ }
+
+ tssi_init_db = lin2dBd((short)res[1] - res[0]);
+ tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4);
+ dev->tssi_init = res[0];
+ dev->tssi_init_hvga = res[2];
+ dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db;
+
+ dev_dbg(dev->dev,
+ "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n",
+ dev->tssi_init, tssi_init_db, dev->tssi_init_hvga,
+ tssi_init_hvga_db, dev->tssi_init_hvga_offset_db);
+
+ mt7601u_bbp_wr(dev, 22, 0);
+ mt7601u_bbp_wr(dev, 244, 0);
+
+ mt7601u_bbp_wr(dev, 21, 1);
+ udelay(1);
+ mt7601u_bbp_wr(dev, 21, 0);
+
+ mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+ mt7601u_wr(dev, MT_RF_SETTING_0, 0);
+
+ mt7601u_rf_wr(dev, 5, 3, rf_vga);
+ mt7601u_rf_wr(dev, 4, 39, rf_mixer);
+ mt7601u_bbp_wr(dev, 47, bbp_r47);
+
+ mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db);
+}
+
+static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on)
+{
+ int ret, temp, hi_temp = 400, lo_temp = -200;
+
+ temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE;
+ dev->curr_temp = temp;
+
+ /* DPD Calibration */
+ if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) {
+ dev->dpd_temp = temp;
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+ if (ret)
+ return ret;
+
+ mt7601u_vco_cal(dev);
+
+ dev_dbg(dev->dev, "Recalibrate DPD\n");
+ }
+
+ /* PLL Lock Protect */
+ if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */
+ dev->pll_lock_protect = true;
+
+ mt7601u_rf_wr(dev, 4, 4, 6);
+ mt7601u_rf_clear(dev, 4, 10, 0x30);
+
+ dev_dbg(dev->dev, "PLL lock protect on - too cold\n");
+ } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */
+ dev->pll_lock_protect = false;
+
+ mt7601u_rf_wr(dev, 4, 4, 0);
+ mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10);
+
+ dev_dbg(dev->dev, "PLL lock protect off\n");
+ }
+
+ if (on) {
+ hi_temp -= 50;
+ lo_temp -= 50;
+ }
+
+ /* BBP CR for H, L, N temperature */
+ if (temp > hi_temp)
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high");
+ else if (temp > lo_temp)
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal");
+ else
+ return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low");
+}
+
+/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */
+static int mt7601u_current_tx_power(struct mt7601u_dev *dev)
+{
+ return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1];
+}
+
+static bool mt7601u_use_hvga(struct mt7601u_dev *dev)
+{
+ return !(mt7601u_current_tx_power(dev) > 20);
+}
+
+static s16
+mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
+{
+ static const s16 decode_tb[] = { 0, 8847, -5734, -5734 };
+ u32 reg;
+
+ switch (phy_mode) {
+ case MT_PHY_TYPE_OFDM:
+ tx_rate += 4;
+ case MT_PHY_TYPE_CCK:
+ reg = dev->rf_pa_mode[0];
+ break;
+ default:
+ reg = dev->rf_pa_mode[1];
+ break;
+ }
+
+ return decode_tb[(reg >> (tx_rate * 2)) & 0x3];
+}
+
+static struct mt7601u_tssi_params
+mt7601u_tssi_params_get(struct mt7601u_dev *dev)
+{
+ static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 };
+ static const int static_power[4] = { 0, -49152, -98304, 49152 };
+ struct mt7601u_tssi_params p;
+ u8 bbp_r47, pkt_type, tx_rate;
+ struct power_per_rate *rate_table;
+
+ bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+ p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
+ dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP);
+ pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T);
+
+ p.trgt_power = mt7601u_current_tx_power(dev);
+
+ switch (pkt_type & 0x03) {
+ case MT_PHY_TYPE_CCK:
+ tx_rate = (pkt_type >> 4) & 0x03;
+ rate_table = dev->ee->power_rate_table.cck;
+ break;
+
+ case MT_PHY_TYPE_OFDM:
+ tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07];
+ rate_table = dev->ee->power_rate_table.ofdm;
+ break;
+
+ default:
+ tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE);
+ tx_rate &= 0x7f;
+ rate_table = dev->ee->power_rate_table.ht;
+ break;
+ }
+
+ if (dev->bw == MT_BW_20)
+ p.trgt_power += rate_table[tx_rate / 2].bw20;
+ else
+ p.trgt_power += rate_table[tx_rate / 2].bw40;
+
+ p.trgt_power <<= 12;
+
+ dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
+
+ p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
+ tx_rate);
+
+ /* Channel 14, cck, bw20 */
+ if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) {
+ if (mt7601u_bbp_rr(dev, 4) & 0x20)
+ p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
+ else
+ p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
+ }
+
+ p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
+
+ p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
+
+ dev_dbg(dev->dev,
+ "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n",
+ p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
+
+ return p;
+}
+
+static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev)
+{
+ return !(mt7601u_bbp_rr(dev, 47) & 0x10);
+}
+
+static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
+{
+ struct mt7601u_tssi_params params;
+ int curr_pwr, diff_pwr;
+ char tssi_offset;
+ s8 tssi_init;
+ s16 tssi_m_dc, tssi_db;
+ bool hvga;
+ u32 val;
+
+ if (!dev->ee->tssi_enabled)
+ return 0;
+
+ hvga = mt7601u_use_hvga(dev);
+ if (!dev->tssi_read_trig)
+ return mt7601u_mcu_tssi_read_kick(dev, hvga);
+
+ if (!mt7601u_tssi_read_ready(dev))
+ return 0;
+
+ params = mt7601u_tssi_params_get(dev);
+
+ tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init);
+ tssi_m_dc = params.tssi0 - tssi_init;
+ tssi_db = lin2dBd(tssi_m_dc);
+ dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n",
+ tssi_m_dc, tssi_db, hvga);
+
+ if (dev->chandef.chan->hw_value < 5)
+ tssi_offset = dev->ee->tssi_data.offset[0];
+ else if (dev->chandef.chan->hw_value < 9)
+ tssi_offset = dev->ee->tssi_data.offset[1];
+ else
+ tssi_offset = dev->ee->tssi_data.offset[2];
+
+ if (hvga)
+ tssi_db -= dev->tssi_init_hvga_offset_db;
+
+ curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9);
+ diff_pwr = params.trgt_power - curr_pwr;
+ dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr);
+
+ if (params.tssi0 > 126 && diff_pwr > 0) {
+ dev_err(dev->dev, "Error: TSSI upper saturation\n");
+ diff_pwr = 0;
+ }
+ if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) {
+ dev_err(dev->dev, "Error: TSSI lower saturation\n");
+ diff_pwr = 0;
+ }
+
+ if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 &&
+ (abs(diff_pwr) > abs(dev->prev_pwr_diff) ||
+ (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff)))
+ diff_pwr = 0;
+ else
+ dev->prev_pwr_diff = diff_pwr;
+
+ diff_pwr += (diff_pwr > 0) ? 2048 : -2048;
+ diff_pwr /= 4096;
+
+ dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
+
+ val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
+ curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+ diff_pwr += curr_pwr;
+ val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
+ mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
+
+ return mt7601u_mcu_tssi_read_kick(dev, hvga);
+}
+
+static u8 mt7601u_agc_default(struct mt7601u_dev *dev)
+{
+ return (dev->ee->lna_gain - 8) * 2 + 0x34;
+}
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev)
+{
+ u8 agc = mt7601u_agc_default(dev);
+
+ mt7601u_bbp_wr(dev, 66, agc);
+}
+
+void mt7601u_agc_save(struct mt7601u_dev *dev)
+{
+ dev->agc_save = mt7601u_bbp_rr(dev, 66);
+}
+
+void mt7601u_agc_restore(struct mt7601u_dev *dev)
+{
+ mt7601u_bbp_wr(dev, 66, dev->agc_save);
+}
+
+static void mt7601u_agc_tune(struct mt7601u_dev *dev)
+{
+ u8 val = mt7601u_agc_default(dev);
+
+ if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+ return;
+
+ /* Note: only in STA mode and not dozing; perhaps do this only if
+ * there is enough rssi updates since last run?
+ * Rssi updates are only on beacons and U2M so should work...
+ */
+ spin_lock_bh(&dev->con_mon_lock);
+ if (dev->avg_rssi <= -70)
+ val -= 0x20;
+ else if (dev->avg_rssi <= -60)
+ val -= 0x10;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ if (val != mt7601u_bbp_rr(dev, 66))
+ mt7601u_bbp_wr(dev, 66, val);
+
+ /* TODO: also if lost a lot of beacons try resetting
+ * (see RTMPSetAGCInitValue() call in mlme.c).
+ */
+}
+
+static void mt7601u_phy_calibrate(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ cal_work.work);
+
+ mt7601u_agc_tune(dev);
+ mt7601u_tssi_cal(dev);
+ /* If TSSI calibration was run it already updated temperature. */
+ if (!dev->ee->tssi_enabled)
+ dev->raw_temp = mt7601u_read_temp(dev);
+ mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */
+
+ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+static unsigned long
+__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode)
+{
+ u8 activate_threshold, deactivate_threshold;
+
+ trace_freq_cal_offset(dev, phy_mode, last_offset);
+
+ /* No beacons received - reschedule soon */
+ if (last_offset == MT_FREQ_OFFSET_INVALID)
+ return MT_FREQ_CAL_ADJ_INTERVAL;
+
+ switch (phy_mode) {
+ case MT_PHY_TYPE_CCK:
+ activate_threshold = 19;
+ deactivate_threshold = 5;
+ break;
+ case MT_PHY_TYPE_OFDM:
+ activate_threshold = 102;
+ deactivate_threshold = 32;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ activate_threshold = 82;
+ deactivate_threshold = 20;
+ break;
+ default:
+ WARN_ON(1);
+ return MT_FREQ_CAL_CHECK_INTERVAL;
+ }
+
+ if (abs(last_offset) >= activate_threshold)
+ dev->freq_cal.adjusting = true;
+ else if (abs(last_offset) <= deactivate_threshold)
+ dev->freq_cal.adjusting = false;
+
+ if (!dev->freq_cal.adjusting)
+ return MT_FREQ_CAL_CHECK_INTERVAL;
+
+ if (last_offset > deactivate_threshold) {
+ if (dev->freq_cal.freq > 0)
+ dev->freq_cal.freq--;
+ else
+ dev->freq_cal.adjusting = false;
+ } else if (last_offset < -deactivate_threshold) {
+ if (dev->freq_cal.freq < 0xbf)
+ dev->freq_cal.freq++;
+ else
+ dev->freq_cal.adjusting = false;
+ }
+
+ trace_freq_cal_adjust(dev, dev->freq_cal.freq);
+ mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq);
+ mt7601u_vco_cal(dev);
+
+ return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL :
+ MT_FREQ_CAL_CHECK_INTERVAL;
+}
+
+static void mt7601u_phy_freq_cal(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ freq_cal.work.work);
+ s8 last_offset;
+ u8 phy_mode;
+ unsigned long delay;
+
+ spin_lock_bh(&dev->con_mon_lock);
+ last_offset = dev->bcn_freq_off;
+ phy_mode = dev->bcn_phy_mode;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode);
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
+
+ spin_lock_bh(&dev->con_mon_lock);
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+}
+
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+ struct ieee80211_bss_conf *info)
+{
+ if (!info->assoc)
+ cancel_delayed_work_sync(&dev->freq_cal.work);
+
+ /* Start/stop collecting beacon data */
+ spin_lock_bh(&dev->con_mon_lock);
+ ether_addr_copy(dev->ap_bssid, info->bssid);
+ dev->avg_rssi = 0;
+ dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+ spin_unlock_bh(&dev->con_mon_lock);
+
+ dev->freq_cal.freq = dev->ee->rf_freq_off;
+ dev->freq_cal.enabled = info->assoc;
+ dev->freq_cal.adjusting = false;
+
+ if (info->assoc)
+ ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+ MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int mt7601u_init_cal(struct mt7601u_dev *dev)
+{
+ u32 mac_ctrl;
+ int ret;
+
+ dev->raw_temp = mt7601u_read_bootup_temp(dev);
+ dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) *
+ MT_EE_TEMPERATURE_SLOPE;
+ dev->dpd_temp = dev->curr_temp;
+
+ mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7601u_rf_rr(dev, 0, 4);
+ if (ret < 0)
+ return ret;
+ ret |= 0x80;
+ ret = mt7601u_rf_wr(dev, 0, 4, ret);
+ if (ret)
+ return ret;
+ msleep(2);
+
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0);
+ if (ret)
+ return ret;
+
+ mt7601u_rxdc_cal(dev);
+
+ ret = mt7601u_set_bw_filter(dev, true);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0);
+ if (ret)
+ return ret;
+ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+ if (ret)
+ return ret;
+
+ mt7601u_rxdc_cal(dev);
+
+ mt7601u_tssi_dc_gain_cal(dev);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+
+ mt7601u_temp_comp(dev, true);
+
+ return 0;
+}
+
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw)
+{
+ u32 val, old;
+
+ if (bw == dev->bw) {
+ /* Vendor driver does the rmc even when no change is needed. */
+ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+ return 0;
+ }
+ dev->bw = bw;
+
+ /* Stop MAC for the time of bw change */
+ old = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+ val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, val);
+ mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+ 0, 500000);
+
+ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+ mt7601u_wr(dev, MT_MAC_SYS_CTRL, old);
+
+ return mt7601u_load_bbp_temp_table_bw(dev);
+}
+
+/**
+ * mt7601u_set_rx_path - set rx path in BBP
+ * @dev: pointer to adapter structure
+ * @path: rx path to set values are 0-based
+ */
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
+{
+ mt7601u_bbp_rmw(dev, 3, 0x18, path << 3);
+}
+
+/**
+ * mt7601u_set_tx_dac - set which tx DAC to use
+ * @dev: pointer to adapter structure
+ * @path: DAC index, values are 0-based
+ */
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
+{
+ mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3);
+}
+
+int mt7601u_phy_init(struct mt7601u_dev *dev)
+{
+ int ret;
+
+ dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0);
+ dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1);
+
+ ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off);
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_central,
+ ARRAY_SIZE(rf_central));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_channel,
+ ARRAY_SIZE(rf_channel));
+ if (ret)
+ return ret;
+ ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga));
+ if (ret)
+ return ret;
+
+ ret = mt7601u_init_cal(dev);
+ if (ret)
+ return ret;
+
+ dev->prev_pwr_diff = 100;
+
+ INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate);
+ INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
new file mode 100644
index 000000000000..afd8978e83fa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#ifndef GENMASK
+#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif
+
+#define MT_ASIC_VERSION 0x0000
+
+#define MT76XX_REV_E3 0x22
+#define MT76XX_REV_E4 0x33
+
+#define MT_CMB_CTRL 0x0020
+#define MT_CMB_CTRL_XTAL_RDY BIT(22)
+#define MT_CMB_CTRL_PLL_LD BIT(23)
+
+#define MT_EFUSE_CTRL 0x0024
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_DATA_BASE 0x0028
+#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0 0x0040
+#define MT_COEXCFG0_COEX_EN BIT(0)
+
+#define MT_WLAN_FUN_CTRL 0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0 0x0100
+#define MT_XO_CTRL1 0x0104
+#define MT_XO_CTRL2 0x0108
+#define MT_XO_CTRL3 0x010c
+#define MT_XO_CTRL4 0x0110
+
+#define MT_XO_CTRL5 0x0114
+#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8)
+
+#define MT_XO_CTRL6 0x0118
+#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8)
+
+#define MT_XO_CTRL7 0x011c
+
+#define MT_WLAN_MTC_CTRL 0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28)
+
+#define MT_INT_SOURCE_CSR 0x0200
+#define MT_INT_MASK_CSR 0x0204
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n) BIT(_n + 4)
+#define MT_INT_RX_COHERENT BIT(16)
+#define MT_INT_TX_COHERENT BIT(17)
+#define MT_INT_ANY_COHERENT BIT(18)
+#define MT_INT_MCU_CMD BIT(19)
+#define MT_INT_TBTT BIT(20)
+#define MT_INT_PRE_TBTT BIT(21)
+#define MT_INT_TX_STAT BIT(22)
+#define MT_INT_AUTO_WAKEUP BIT(23)
+#define MT_INT_GPTIMER BIT(24)
+#define MT_INT_RXDELAYINT BIT(26)
+#define MT_INT_TXDELAYINT BIT(27)
+
+#define MT_WPDMA_GLO_CFG 0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MT_WPDMA_RST_IDX 0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG 0x0210
+
+#define MT_WMM_AIFSN 0x0214
+#define MT_WMM_AIFSN_MASK GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMIN 0x0218
+#define MT_WMM_CWMIN_MASK GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_CWMAX 0x021c
+#define MT_WMM_CWMAX_MASK GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE 0x0220
+#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK GENMASK(15, 0)
+
+#define MT_FCE_DMA_ADDR 0x0230
+#define MT_FCE_DMA_LEN 0x0234
+
+#define MT_USB_DMA_CFG 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_PHY_CLR BIT(16)
+#define MT_USB_DMA_CFG_TX_CLR BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 27)
+#define MT_USB_DMA_CFG_RX_BUSY BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY BIT(31)
+
+#define MT_TSO_CTRL 0x0250
+#define MT_HEADER_TRANS_CTRL_REG 0x0260
+
+#define MT_US_CYC_CFG 0x02a4
+#define MT_US_CYC_CNT GENMASK(7, 0)
+
+#define MT_TX_RING_BASE 0x0300
+#define MT_RX_RING_BASE 0x03c0
+#define MT_RING_SIZE 0x10
+
+#define MT_TX_HW_QUEUE_MCU 8
+#define MT_TX_HW_QUEUE_MGMT 9
+
+#define MT_PBF_SYS_CTRL 0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4)
+
+#define MT_PBF_CFG 0x0404
+#define MT_PBF_CFG_TX0Q_EN BIT(0)
+#define MT_PBF_CFG_TX1Q_EN BIT(1)
+#define MT_PBF_CFG_TX2Q_EN BIT(2)
+#define MT_PBF_CFG_TX3Q_EN BIT(3)
+#define MT_PBF_CFG_RX0Q_EN BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT 0x0408
+#define MT_PBF_RX_MAX_PCNT 0x040c
+
+#define MT_BCN_OFFSET_BASE 0x041c
+#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_CSR_CFG 0x0500
+#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR BIT(30)
+#define MT_RF_CSR_CFG_KICK BIT(31)
+
+#define MT_RF_BYPASS_0 0x0504
+#define MT_RF_BYPASS_1 0x0508
+#define MT_RF_SETTING_0 0x050c
+
+#define MT_RF_DATA_WRITE 0x0524
+
+#define MT_RF_CTRL 0x0528
+#define MT_RF_CTRL_ADDR GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE BIT(12)
+#define MT_RF_CTRL_BUSY BIT(13)
+#define MT_RF_CTRL_IDX BIT(16)
+
+#define MT_RF_DATA_READ 0x052c
+
+#define MT_FCE_PSE_CTRL 0x0800
+#define MT_FCE_PARAMETERS 0x0804
+#define MT_FCE_CSO 0x0808
+
+#define MT_FCE_L2_STUFF 0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1 0x0a38
+
+#define MT_FCE_SKIP_FS 0x0a6c
+
+#define MT_MAC_CSR0 0x1000
+
+#define MT_MAC_SYS_CTRL 0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3)
+
+#define MT_MAC_ADDR_DW0 0x1008
+#define MT_MAC_ADDR_DW1 0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0 0x1010
+#define MT_MAC_BSSID_DW1 0x1014
+#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG 0x1018
+#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12)
+
+#define MT_BBP_CSR_CFG 0x101c
+#define MT_BBP_CSR_CFG_VAL GENMASK(7, 0)
+#define MT_BBP_CSR_CFG_REG_NUM GENMASK(15, 8)
+#define MT_BBP_CSR_CFG_READ BIT(16)
+#define MT_BBP_CSR_CFG_BUSY BIT(17)
+#define MT_BBP_CSR_CFG_PAR_DUR BIT(18)
+#define MT_BBP_CSR_CFG_RW_MODE BIT(19)
+
+#define MT_AMPDU_MAX_LEN_20M1S 0x1030
+#define MT_AMPDU_MAX_LEN_20M2S 0x1034
+#define MT_AMPDU_MAX_LEN_40M1S 0x1038
+#define MT_AMPDU_MAX_LEN_40M2S 0x103c
+#define MT_AMPDU_MAX_LEN 0x1040
+
+#define MT_WCID_DROP_BASE 0x106c
+#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK 0x108c
+
+#define MT_MAC_APC_BSSID_BASE 0x1090
+#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN BIT(16)
+
+#define MT_XIFS_TIME_CFG 0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29)
+
+#define MT_BKOFF_SLOT_CFG 0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG 0x1114
+#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG 0x1118
+#define MT_TBTT_TIMER_CFG 0x1124
+
+#define MT_INT_TIMER_CFG 0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN 0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1)
+
+#define MT_MAC_STATUS 0x1200
+#define MT_MAC_STATUS_TX BIT(0)
+#define MT_MAC_STATUS_RX BIT(1)
+
+#define MT_PWR_PIN_CFG 0x1204
+#define MT_AUX_CLK_CFG 0x120c
+
+#define MT_BB_PA_MODE_CFG0 0x1214
+#define MT_BB_PA_MODE_CFG1 0x1218
+#define MT_RF_PA_MODE_CFG0 0x121c
+#define MT_RF_PA_MODE_CFG1 0x1220
+
+#define MT_RF_PA_MODE_ADJ0 0x1228
+#define MT_RF_PA_MODE_ADJ1 0x122c
+
+#define MT_DACCLK_EN_DLY_CFG 0x1264
+
+#define MT_EDCA_CFG_BASE 0x1300
+#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0 0x1314
+#define MT_TX_PWR_CFG_1 0x1318
+#define MT_TX_PWR_CFG_2 0x131c
+#define MT_TX_PWR_CFG_3 0x1320
+#define MT_TX_PWR_CFG_4 0x1324
+
+#define MT_TX_BAND_CFG 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M BIT(0)
+#define MT_TX_BAND_CFG_5G BIT(1)
+#define MT_TX_BAND_CFG_2G BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY 0x1384
+#define MT_TX_MPDU_ADJ_INT 0x1388
+
+#define MT_TX_PWR_CFG_7 0x13d4
+#define MT_TX_PWR_CFG_8 0x13d8
+#define MT_TX_PWR_CFG_9 0x13dc
+
+#define MT_TX_SW_CFG0 0x1330
+#define MT_TX_SW_CFG1 0x1334
+#define MT_TX_SW_CFG2 0x1338
+
+#define MT_TXOP_CTRL_CFG 0x1340
+#define MT_TXOP_TRUN_EN GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG 0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK BIT(24)
+
+#define MT_TX_TIMEOUT_CFG 0x1348
+#define MT_TX_RETRY_CFG 0x134c
+#define MT_TX_LINK_CFG 0x1350
+#define MT_HT_FBK_CFG0 0x1354
+#define MT_HT_FBK_CFG1 0x1358
+#define MT_LG_FBK_CFG0 0x135c
+#define MT_LG_FBK_CFG1 0x1360
+
+#define MT_CCK_PROT_CFG 0x1364
+#define MT_OFDM_PROT_CFG 0x1368
+#define MT_MM20_PROT_CFG 0x136c
+#define MT_MM40_PROT_CFG 0x1370
+#define MT_GF20_PROT_CFG 0x1374
+#define MT_GF40_PROT_CFG 0x1378
+
+#define MT_PROT_RATE GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS BIT(16)
+#define MT_PROT_CTRL_CTS2SELF BIT(17)
+#define MT_PROT_NAV_SHORT BIT(18)
+#define MT_PROT_NAV_LONG BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20 BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40 BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20 BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40 BIT(25)
+#define MT_PROT_RTS_THR_EN BIT(26)
+#define MT_PROT_RATE_CCK_11 0x0003
+#define MT_PROT_RATE_OFDM_6 0x4000
+#define MT_PROT_RATE_OFDM_24 0x4004
+#define MT_PROT_RATE_DUP_OFDM_24 0x4084
+#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \
+ ~MT_PROT_TXOP_ALLOW_MM40 & \
+ ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME 0x1380
+
+#define MT_TX_PWR_CFG_0_EXT 0x1390
+#define MT_TX_PWR_CFG_1_EXT 0x1394
+
+#define MT_TX_FBK_LIMIT 0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR 0x13a0
+#define MT_TX1_RF_GAIN_CORR 0x13a4
+#define MT_TX0_RF_GAIN_ATTEN 0x13a8
+
+#define MT_TX_ALC_CFG_0 0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1 0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2 0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN 0x13c0
+
+#define MT_TX_ALC_VGA3 0x13c8
+
+#define MT_TX_PROT_CFG6 0x13e0
+#define MT_TX_PROT_CFG7 0x13e4
+#define MT_TX_PROT_CFG8 0x13e8
+
+#define MT_PIFS_TX_CFG 0x13ec
+
+#define MT_RX_FILTR_CFG 0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR BIT(4)
+#define MT_RX_FILTR_CFG_MCAST BIT(5)
+#define MT_RX_FILTR_CFG_BCAST BIT(6)
+#define MT_RX_FILTR_CFG_DUP BIT(7)
+#define MT_RX_FILTR_CFG_CFACK BIT(8)
+#define MT_RX_FILTR_CFG_CFEND BIT(9)
+#define MT_RX_FILTR_CFG_ACK BIT(10)
+#define MT_RX_FILTR_CFG_CTS BIT(11)
+#define MT_RX_FILTR_CFG_RTS BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL BIT(13)
+#define MT_RX_FILTR_CFG_BA BIT(14)
+#define MT_RX_FILTR_CFG_BAR BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16)
+
+#define MT_AUTO_RSP_CFG 0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT BIT(4)
+
+#define MT_LEGACY_BASIC_RATE 0x1408
+#define MT_HT_BASIC_RATE 0x140c
+
+#define MT_RX_PARSER_CFG 0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0)
+
+#define MT_EXT_CCA_CFG 0x141c
+#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3 0x1478
+
+#define MT_PN_PAD_MODE 0x150c
+
+#define MT_TXOP_HLDR_ET 0x1608
+
+#define MT_PROT_AUTO_TX_CFG 0x1648
+
+#define MT_RX_STA_CNT0 0x1700
+#define MT_RX_STA_CNT1 0x1704
+#define MT_RX_STA_CNT2 0x1708
+#define MT_TX_STA_CNT0 0x170c
+#define MT_TX_STA_CNT1 0x1710
+#define MT_TX_STA_CNT2 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ * MT_TX_STAT_FIFO_RATE GENMASK(26, 16)
+ * MT_TX_STAT_FIFO_ETXBF BIT(27)
+ * MT_TX_STAT_FIFO_SND BIT(28)
+ * MT_TX_STAT_FIFO_ITXBF BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO 0x1718
+#define MT_TX_STAT_FIFO_VALID BIT(0)
+#define MT_TX_STAT_FIFO_PID_TYPE GENMASK(4, 1)
+#define MT_TX_STAT_FIFO_SUCCESS BIT(5)
+#define MT_TX_STAT_FIFO_AGGR BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ BIT(7)
+#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT 0x171c
+
+#define MT_TX_AGG_CNT_BASE0 0x1720
+
+#define MT_MPDU_DENSITY_CNT 0x1740
+
+#define MT_TX_AGG_CNT_BASE1 0x174c
+
+#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \
+ MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+ MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT 0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0)
+
+#define MT_BBP_CORE_BASE 0x2000
+#define MT_BBP_IBI_BASE 0x2100
+#define MT_BBP_AGC_BASE 0x2300
+#define MT_BBP_TXC_BASE 0x2400
+#define MT_BBP_RXC_BASE 0x2500
+#define MT_BBP_TXO_BASE 0x2600
+#define MT_BBP_TXBE_BASE 0x2700
+#define MT_BBP_RXFE_BASE 0x2800
+#define MT_BBP_RXO_BASE 0x2900
+#define MT_BBP_DFS_BASE 0x2a00
+#define MT_BBP_TR_BASE 0x2b00
+#define MT_BBP_CAL_BASE 0x2c00
+#define MT_BBP_DSC_BASE 0x2e00
+#define MT_BBP_PFMU_BASE 0x2f00
+
+#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE 0x1800
+#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE 0x4000
+
+#define MT_WCID_KEY_BASE 0x8000
+#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE 0xa000
+#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE 0xa800
+#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0 0xac00
+#define MT_SKEY_BASE_1 0xb400
+#define MT_SKEY_0(_bss, _idx) \
+ (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx) \
+ (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx) \
+ ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0 0xb000
+#define MT_SKEY_MODE_BASE_1 0xb3f0
+#define MT_SKEY_MODE_0(_bss) \
+ (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss) \
+ (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss) \
+ ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE 0xc000
+
+#define MT_TEMP_SENSOR 0x1d000
+#define MT_TEMP_SENSOR_VAL GENMASK(6, 0)
+
+enum mt76_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_CKIP40,
+ MT_CIPHER_CKIP104,
+ MT_CIPHER_CKIP128,
+ MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c
new file mode 100644
index 000000000000..8abdd3cd546d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h
new file mode 100644
index 000000000000..289897300ef0
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/trace.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7601U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7601u.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7601u
+
+#define MAXNAME 32
+#define DEV_ENTRY __array(char, wiphy_name, 32)
+#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
+ wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT "%s "
+#define DEV_PR_ARG __entry->wiphy_name
+
+#define REG_ENTRY __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT "%04x=%08x"
+#define REG_PR_ARG __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ REG_ENTRY
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT REG_PR_FMT,
+ DEV_PR_ARG, REG_PR_ARG
+ )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_read,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_write,
+ TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+ TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt_submit_urb,
+ TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \
+ struct urb u; \
+ u.pipe = __pipe; \
+ u.transfer_buffer_length = __len; \
+ trace_mt_submit_urb(__dev, &u); \
+})
+
+TRACE_EVENT(mt_mcu_msg_send,
+ TP_PROTO(struct mt7601u_dev *dev,
+ struct sk_buff *skb, u32 csum, bool resp),
+ TP_ARGS(dev, skb, csum, resp),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, info)
+ __field(u32, csum)
+ __field(bool, resp)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->info = *(u32 *)skb->data;
+ __entry->csum = csum;
+ __entry->resp = resp;
+ ),
+ TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+ DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt_vend_req,
+ TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
+ u16 val, u16 offset, void *buf, size_t buflen, int ret),
+ TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+ __field(u16, val) __field(u16, offset) __field(void*, buf)
+ __field(int, buflen) __field(int, ret)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = pipe;
+ __entry->req = req;
+ __entry->req_type = req_type;
+ __entry->val = val;
+ __entry->offset = offset;
+ __entry->buf = buf;
+ __entry->buflen = buflen;
+ __entry->ret = ret;
+ ),
+ TP_printk(DEV_PR_FMT
+ "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+ DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+ __entry->req_type, __entry->val, __entry->offset,
+ !!__entry->buf, __entry->buflen)
+);
+
+TRACE_EVENT(ee_read,
+ TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
+ TP_ARGS(dev, offset, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, o) __field(u16, v)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->o = offset;
+ __entry->v = val;
+ ),
+ TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, bank)
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ __entry->bank = bank;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_read,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_write,
+ TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+ TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, reg)
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ REG_ASSIGN;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx=%02hhx",
+ DEV_PR_ARG, __entry->reg, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
+ TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+ TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->val = val;
+ ),
+ TP_printk(
+ DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+ )
+);
+
+DEFINE_EVENT(dev_simple_evt, temp_mode,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, read_temp,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(freq_cal_offset,
+ TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
+ TP_ARGS(dev, phy_mode, freq_off),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, phy_mode)
+ __field(s8, freq_off)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->phy_mode = phy_mode;
+ __entry->freq_off = freq_off;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
+);
+
+TRACE_EVENT(mt_rx,
+ TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
+ TP_ARGS(dev, rxwi, f),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt7601u_rxwi, rxwi)
+ __field(u32, fce_info)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->rxwi = *rxwi;
+ __entry->fce_info = f;
+ ),
+ TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
+ "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
+ "ant:%02hhx gain:%02hhx freq_o:%02hhx "
+ "r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
+ le32_to_cpu(__entry->rxwi.rxinfo),
+ le32_to_cpu(__entry->rxwi.ctl),
+ le16_to_cpu(__entry->rxwi.frag_sn),
+ le16_to_cpu(__entry->rxwi.rate),
+ __entry->rxwi.unknown,
+ __entry->rxwi.zero[0], __entry->rxwi.zero[1],
+ __entry->rxwi.zero[2],
+ __entry->rxwi.snr, __entry->rxwi.ant,
+ __entry->rxwi.gain, __entry->rxwi.freq_off,
+ __entry->rxwi.resv2, __entry->rxwi.expert_ant,
+ __entry->fce_info)
+);
+
+TRACE_EVENT(mt_tx,
+ TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct mt76_sta *sta, struct mt76_txwi *h),
+ TP_ARGS(dev, skb, sta, h),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field_struct(struct mt76_txwi, h)
+ __field(struct sk_buff *, skb)
+ __field(struct mt76_sta *, sta)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->h = *h;
+ __entry->skb = skb;
+ __entry->sta = sta;
+ ),
+ TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx "
+ "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+ __entry->skb, __entry->sta,
+ le16_to_cpu(__entry->h.flags),
+ le16_to_cpu(__entry->h.rate_ctl),
+ __entry->h.ack_ctl, __entry->h.wcid,
+ le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt_tx_dma_done,
+ TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
+ TP_ARGS(dev, skb),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(struct sk_buff *, skb)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->skb = skb;
+ ),
+ TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt_tx_status_cleaned,
+ TP_PROTO(struct mt7601u_dev *dev, int cleaned),
+ TP_ARGS(dev, cleaned),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(int, cleaned)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cleaned = cleaned;
+ ),
+ TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt_tx_status,
+ TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
+ TP_ARGS(dev, stat1, stat2),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u32, stat1) __field(u32, stat2)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->stat1 = stat1;
+ __entry->stat2 = stat2;
+ ),
+ TP_printk(DEV_PR_FMT "%08x %08x",
+ DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt_rx_dma_aggr,
+ TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
+ TP_ARGS(dev, cnt, paged),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, cnt)
+ __field(bool, paged)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->cnt = cnt;
+ __entry->paged = paged;
+ ),
+ TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+ DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, set_key,
+ TP_PROTO(struct mt7601u_dev *dev, u8 val),
+ TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(set_shared_key,
+ TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
+ TP_ARGS(dev, vid, key),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u8, vid)
+ __field(u8, key)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->vid = vid;
+ __entry->key = key;
+ ),
+ TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+ DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
new file mode 100644
index 000000000000..0be2080ceab3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+
+enum mt76_txq_id {
+ MT_TXQ_VO = IEEE80211_AC_VO,
+ MT_TXQ_VI = IEEE80211_AC_VI,
+ MT_TXQ_BE = IEEE80211_AC_BE,
+ MT_TXQ_BK = IEEE80211_AC_BK,
+ MT_TXQ_PSD,
+ MT_TXQ_MCU,
+ __MT_TXQ_MAX
+};
+
+/* Hardware uses mirrored order of queues with Q0 having the highest priority */
+static u8 q2hwq(u8 q)
+{
+ return q ^ 0x3;
+}
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+ int qid = skb_get_queue_mapping(skb);
+
+ if (WARN_ON(qid >= MT_TXQ_PSD)) {
+ qid = MT_TXQ_BE;
+ skb_set_queue_mapping(skb, qid);
+ }
+
+ return q2hwq(qid);
+}
+
+/* Note: TX retry reporting is a bit broken.
+ * Retries are reported only once per AMPDU and often come a frame early
+ * i.e. they are reported in the last status preceding the AMPDU. Apart
+ * from the fact that it's hard to know the length of the AMPDU (which is
+ * required to know to how many consecutive frames retries should be
+ * applied), if status comes early on full FIFO it gets lost and retries
+ * of the whole AMPDU become invisible.
+ * As a work-around encode the desired rate in PKT_ID of TX descriptor
+ * and based on that guess the retries (every rate is tried once).
+ * Only downside here is that for MCS0 we have to rely solely on
+ * transmission failures as no retries can ever be reported.
+ * Not having to read EXT_FIFO has a nice effect of doubling the number
+ * of reports which can be fetched.
+ * Also the vendor driver never uses the EXT_FIFO register so it may be
+ * undertested.
+ */
+static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
+{
+ u8 encoded = (rate + 1) + is_probe * 8;
+
+ /* Because PKT_ID 0 disables status reporting only 15 values are
+ * available but 16 are needed (8 MCS * 2 for encoding is_probe)
+ * - we need to cram together two rates. MCS0 and MCS7 with is_probe
+ * share PKT_ID 9.
+ */
+ if (is_probe && rate == 7)
+ return encoded - 7;
+
+ return encoded;
+}
+
+static void
+mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+ u8 req_rate = stat->pktid;
+ u8 eff_rate = stat->rate & 0x7;
+
+ req_rate -= 1;
+
+ if (req_rate > 7) {
+ stat->is_probe = true;
+ req_rate -= 8;
+
+ /* Decide between MCS0 and MCS7 which share pktid 9 */
+ if (!req_rate && eff_rate)
+ req_rate = 7;
+ }
+
+ stat->retry = req_rate - eff_rate;
+}
+
+static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+ struct ieee80211_tx_info *info)
+{
+ int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+ skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+ if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+ mt76_remove_hdr_pad(skb);
+
+ skb_trim(skb, pkt_len);
+}
+
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ mt7601u_tx_skb_remove_dma_overhead(skb, info);
+
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status(dev->hw, skb);
+}
+
+static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u32 need_head;
+
+ need_head = sizeof(struct mt76_txwi) + 4;
+ if (hdr_len % 4)
+ need_head += 2;
+
+ return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+ int pkt_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct mt76_txwi *txwi;
+ unsigned long flags;
+ bool is_probe;
+ u32 pkt_id;
+ u16 rate_ctl;
+ u8 nss;
+
+ txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+ memset(txwi, 0, sizeof(*txwi));
+
+ if (!wcid->tx_rate_set)
+ ieee80211_get_tx_rates(info->control.vif, sta, skb,
+ info->control.rates, 1);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (rate->idx < 0 || !rate->count)
+ rate_ctl = wcid->tx_rate;
+ else
+ rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+ u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+ ba_size <<= sta->ht_cap.ampdu_factor;
+ ba_size = min_t(int, 63, ba_size);
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ ba_size = 0;
+ txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+ txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+ MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density));
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ txwi->flags = 0;
+ }
+
+ txwi->wcid = wcid->idx;
+
+ is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+ pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
+ pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+ txwi->len_ctl = cpu_to_le16(pkt_len);
+
+ return txwi;
+}
+
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt7601u_dev *dev = hw->priv;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct mt76_sta *msta = NULL;
+ struct mt76_wcid *wcid = dev->mon_wcid;
+ struct mt76_txwi *txwi;
+ int pkt_len = skb->len;
+ int hw_q = skb2q(skb);
+
+ BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+ info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+ if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return;
+ }
+
+ if (sta) {
+ msta = (struct mt76_sta *) sta->drv_priv;
+ wcid = &msta->wcid;
+ } else if (vif) {
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+ wcid = &mvif->group_wcid;
+ }
+
+ txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+ if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
+ return;
+
+ trace_mt_tx(dev, skb, msta, txwi);
+}
+
+void mt7601u_tx_stat(struct work_struct *work)
+{
+ struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+ stat_work.work);
+ struct mt76_tx_status stat;
+ unsigned long flags;
+ int cleaned = 0;
+
+ while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+ stat = mt7601u_mac_fetch_tx_status(dev);
+ if (!stat.valid)
+ break;
+
+ mt7601u_tx_pktid_dec(dev, &stat);
+ mt76_send_tx_status(dev, &stat);
+
+ cleaned++;
+ }
+ trace_mt_tx_status_cleaned(dev, cleaned);
+
+ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (cleaned)
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(10));
+ else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
+ queue_delayed_work(dev->stat_wq, &dev->stat_work,
+ msecs_to_jiffies(20));
+ else
+ clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
+ spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7601u_dev *dev = hw->priv;
+ u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+ u32 val;
+
+ /* TODO: should we do funny things with the parameters?
+ * See what mt7601u_set_default_edca() used to do in init.c.
+ */
+
+ if (params->cw_min)
+ cw_min = fls(params->cw_min);
+ if (params->cw_max)
+ cw_max = fls(params->cw_max);
+
+ WARN_ON(params->txop > 0xff);
+ WARN_ON(params->aifs > 0xf);
+ WARN_ON(cw_min > 0xf);
+ WARN_ON(cw_max > 0xf);
+
+ val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
+ MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
+ MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+ /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+ * a really long txop on AC0 (see connect.c:2009) but only on
+ * connect? When not connected should be 0.
+ */
+ if (!hw_q)
+ val |= 0x60;
+ else
+ val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+ mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+ val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+ val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+ val = mt76_rr(dev, MT_WMM_AIFSN);
+ val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+ val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_AIFSN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMIN);
+ val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+ val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMIN, val);
+
+ val = mt76_rr(dev, MT_WMM_CWMAX);
+ val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+ val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+ mt76_wr(dev, MT_WMM_CWMAX, val);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
new file mode 100644
index 000000000000..99e2b3997bfa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7601u.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt7601u_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x17d3) },
+ { USB_DEVICE(0x0e8d, 0x760a) },
+ { USB_DEVICE(0x0e8d, 0x760b) },
+ { USB_DEVICE(0x13d3, 0x3431) },
+ { USB_DEVICE(0x13d3, 0x3434) },
+ { USB_DEVICE(0x148f, 0x7601) },
+ { USB_DEVICE(0x148f, 0x760a) },
+ { USB_DEVICE(0x148f, 0x760b) },
+ { USB_DEVICE(0x148f, 0x760c) },
+ { USB_DEVICE(0x148f, 0x760d) },
+ { USB_DEVICE(0x2001, 0x3d04) },
+ { USB_DEVICE(0x2717, 0x4106) },
+ { USB_DEVICE(0x2955, 0x0001) },
+ { USB_DEVICE(0x2955, 0x1001) },
+ { USB_DEVICE(0x2a5f, 0x1000) },
+ { USB_DEVICE(0x7392, 0x7710) },
+ { 0, }
+};
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+ struct mt7601u_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+ buf->len = len;
+ buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+ buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+ return !buf->urb || !buf->buf;
+}
+
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+ usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+ usb_free_urb(buf->urb);
+}
+
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+ struct mt7601u_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context)
+{
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ unsigned pipe;
+ int ret;
+
+ if (dir == USB_DIR_IN)
+ pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]);
+ else
+ pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]);
+
+ usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+ complete_fn, context);
+ buf->urb->transfer_dma = buf->dma;
+ buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ trace_mt_submit_urb(dev, buf->urb);
+ ret = usb_submit_urb(buf->urb, gfp);
+ if (ret)
+ dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+ dir, ep_idx, ret);
+ return ret;
+}
+
+void mt7601u_complete_urb(struct urb *urb)
+{
+ struct completion *cmpl = urb->context;
+
+ complete(cmpl);
+}
+
+static int
+__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int i, ret;
+ struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+ const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ const unsigned int pipe = (direction == USB_DIR_IN) ?
+ usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+ for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+ ret = usb_control_msg(usb_dev, pipe, req, req_type,
+ val, offset, buf, buflen,
+ MT_VEND_REQ_TOUT_MS);
+ trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
+ buf, buflen, ret);
+
+ if (ret >= 0 || ret == -ENODEV)
+ return ret;
+
+ msleep(5);
+ }
+
+ dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n",
+ req, offset, ret);
+
+ return ret;
+}
+
+int
+mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen)
+{
+ int ret;
+
+ mutex_lock(&dev->vendor_req_mutex);
+
+ ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
+ buf, buflen);
+ if (ret == -ENODEV)
+ set_bit(MT7601U_STATE_REMOVED, &dev->state);
+
+ mutex_unlock(&dev->vendor_req_mutex);
+
+ return ret;
+}
+
+void mt7601u_vendor_reset(struct mt7601u_dev *dev)
+{
+ mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+ MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+ int ret;
+ __le32 reg;
+ u32 val;
+
+ WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+ ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+ 0, offset, &reg, sizeof(reg));
+ val = le32_to_cpu(reg);
+ if (ret > 0 && ret != sizeof(reg)) {
+ dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+ ret, offset);
+ val = ~0;
+ }
+
+ trace_reg_read(dev, offset, val);
+ return val;
+}
+
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val)
+{
+ int ret;
+
+ ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val & 0xffff, offset, NULL, 0);
+ if (ret)
+ return ret;
+ return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+ val >> 16, offset + 2, NULL, 0);
+}
+
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+ WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+ mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+ trace_reg_write(dev, offset, val);
+}
+
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt7601u_rr(dev, offset) & ~mask;
+ mt7601u_wr(dev, offset, val);
+ return val;
+}
+
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ u32 reg = mt7601u_rr(dev, offset);
+
+ val |= reg & ~mask;
+ if (reg != val)
+ mt7601u_wr(dev, offset, val);
+ return val;
+}
+
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+ WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+ mt7601u_burst_write_regs(dev, offset, data, len / 4);
+}
+
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr)
+{
+ mt7601u_wr(dev, offset, get_unaligned_le32(addr));
+ mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt7601u_assign_pipes(struct usb_interface *usb_intf,
+ struct mt7601u_dev *dev)
+{
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+ unsigned i, ep_i = 0, ep_o = 0;
+
+ BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX);
+ BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX);
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc) &&
+ ep_i++ < __MT_EP_IN_MAX) {
+ dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc);
+ dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+ /* Note: this is ignored by usb sub-system but vendor
+ * code does it. We can drop this at some point.
+ */
+ dev->in_eps[ep_i - 1] |= USB_DIR_IN;
+ } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+ ep_o++ < __MT_EP_OUT_MAX) {
+ dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc);
+ dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+ dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n",
+ ep_i, ep_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mt7601u_probe(struct usb_interface *usb_intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+ struct mt7601u_dev *dev;
+ u32 asic_rev, mac_rev;
+ int ret;
+
+ dev = mt7601u_alloc_device(&usb_intf->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
+
+ usb_set_intfdata(usb_intf, dev);
+
+ ret = mt7601u_assign_pipes(usb_intf, dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_wait_asic_ready(dev);
+ if (ret)
+ goto err;
+
+ asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION);
+ mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
+ dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+ asic_rev, mac_rev);
+
+ /* Note: vendor driver skips this check for MT7601U */
+ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+ dev_warn(dev->dev, "Warning: eFUSE not present\n");
+
+ ret = mt7601u_init_hardware(dev);
+ if (ret)
+ goto err;
+ ret = mt7601u_register_device(dev);
+ if (ret)
+ goto err_hw;
+
+ set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+ return 0;
+err_hw:
+ mt7601u_cleanup(dev);
+err:
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->hw);
+ return ret;
+}
+
+static void mt7601u_disconnect(struct usb_interface *usb_intf)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ ieee80211_unregister_hw(dev->hw);
+ mt7601u_cleanup(dev);
+
+ usb_set_intfdata(usb_intf, NULL);
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+ destroy_workqueue(dev->stat_wq);
+ ieee80211_free_hw(dev->hw);
+}
+
+static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ mt7601u_cleanup(dev);
+
+ return 0;
+}
+
+static int mt7601u_resume(struct usb_interface *usb_intf)
+{
+ struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+ return mt7601u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt7601u_device_table);
+MODULE_FIRMWARE(MT7601U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt7601u_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7601u_device_table,
+ .probe = mt7601u_probe,
+ .disconnect = mt7601u_disconnect,
+ .suspend = mt7601u_suspend,
+ .resume = mt7601u_resume,
+ .reset_resume = mt7601u_resume,
+ .soft_unbind = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7601u_driver);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
new file mode 100644
index 000000000000..49e188fa3798
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_USB_H
+#define __MT7601U_USB_H
+
+#include "mt7601u.h"
+
+#define MT7601U_FIRMWARE "mt7601u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY 10
+#define MT_VEND_REQ_TOUT_MS 300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+enum mt_vendor_req {
+ MT_VEND_DEV_MODE = 1,
+ MT_VEND_WRITE = 2,
+ MT_VEND_MULTI_READ = 7,
+ MT_VEND_WRITE_FCE = 0x42,
+};
+
+enum mt_usb_ep_in {
+ MT_EP_IN_PKT_RX,
+ MT_EP_IN_CMD_RESP,
+ __MT_EP_IN_MAX,
+};
+
+enum mt_usb_ep_out {
+ MT_EP_OUT_INBAND_CMD,
+ MT_EP_OUT_AC_BK,
+ MT_EP_OUT_AC_BE,
+ MT_EP_OUT_AC_VI,
+ MT_EP_OUT_AC_VO,
+ MT_EP_OUT_HCCA,
+ __MT_EP_OUT_MAX,
+};
+
+static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u)
+{
+ return interface_to_usbdev(to_usb_interface(mt7601u->dev));
+}
+
+static inline bool mt7601u_urb_has_error(struct urb *urb)
+{
+ return urb->status &&
+ urb->status != -ENOENT &&
+ urb->status != -ECONNRESET &&
+ urb->status != -ESHUTDOWN;
+}
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+ struct mt7601u_dma_buf *buf);
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf);
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+ struct mt7601u_dma_buf *buf, gfp_t gfp,
+ usb_complete_t complete_fn, void *context);
+void mt7601u_complete_urb(struct urb *urb);
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+ const u8 direction, const u16 val, const u16 offset,
+ void *buf, const size_t buflen);
+void mt7601u_vendor_reset(struct mt7601u_dev *dev);
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+ const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c
new file mode 100644
index 000000000000..7c1787c1ddcd
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+void mt76_remove_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ memmove(skb->data + 2, skb->data, len);
+ skb_pull(skb, 2);
+}
+
+int mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+ int ret;
+
+ if (len % 4 == 0)
+ return 0;
+
+ ret = skb_cow(skb, 2);
+ if (ret)
+ return ret;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
new file mode 100644
index 000000000000..b89140bf1210
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt7601u/util.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+/*
+ * Power of two check, this will check
+ * if the mask that has been given contains and contiguous set of bits.
+ * Note that we cannot use the is_power_of_2() function since this
+ * check must be done at compile-time.
+ */
+#define is_power_of_two(x) ( !((x) & ((x)-1)) )
+#define low_bit_mask(x) ( ((x)-1) & ~(x) )
+#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x))
+
+/*
+ * Macros to find first set bit in a variable.
+ * These macros behave the same as the __ffs() functions but
+ * the most important difference that this is done during
+ * compile-time rather then run-time.
+ */
+#define compile_ffs2(__x) \
+ __builtin_choose_expr(((__x) & 0x1), 0, 1)
+
+#define compile_ffs4(__x) \
+ __builtin_choose_expr(((__x) & 0x3), \
+ (compile_ffs2((__x))), \
+ (compile_ffs2((__x) >> 2) + 2))
+
+#define compile_ffs8(__x) \
+ __builtin_choose_expr(((__x) & 0xf), \
+ (compile_ffs4((__x))), \
+ (compile_ffs4((__x) >> 4) + 4))
+
+#define compile_ffs16(__x) \
+ __builtin_choose_expr(((__x) & 0xff), \
+ (compile_ffs8((__x))), \
+ (compile_ffs8((__x) >> 8) + 8))
+
+#define compile_ffs32(__x) \
+ __builtin_choose_expr(((__x) & 0xffff), \
+ (compile_ffs16((__x))), \
+ (compile_ffs16((__x) >> 16) + 16))
+
+/*
+ * This macro will check the requirements for the FIELD{8,16,32} macros
+ * The mask should be a constant non-zero contiguous set of bits which
+ * does not exceed the given typelimit.
+ */
+#define FIELD_CHECK(__mask) \
+ BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
+
+#define MT76_SET(_mask, _val) \
+ ({ \
+ FIELD_CHECK(_mask); \
+ (((u32) (_val)) << compile_ffs32(_mask)) & _mask; \
+ })
+
+#define MT76_GET(_mask, _val) \
+ ({ \
+ FIELD_CHECK(_mask); \
+ (u32) (((_val) & _mask) >> compile_ffs32(_mask)); \
+ })
+
+#endif
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
index 3ab87a855122..65cd461c88db 100644
--- a/drivers/net/wireless/mwifiex/11h.c
+++ b/drivers/net/wireless/mwifiex/11h.c
@@ -134,8 +134,8 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
chandef = priv->dfs_chandef;
if (priv->wdev.cac_started) {
- dev_dbg(priv->adapter->dev,
- "CAC timer finished; No radar detected\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "CAC timer finished; No radar detected\n");
cfg80211_cac_event(priv->netdev, &chandef,
NL80211_RADAR_CAC_FINISHED,
GFP_KERNEL);
@@ -161,9 +161,9 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
cr_req->chan_desc.chan_width = radar_params->chandef->width;
cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms);
- dev_dbg(priv->adapter->dev,
- "11h: issuing DFS Radar check for channel=%d\n",
- radar_params->chandef->chan->hw_value);
+ mwifiex_dbg(priv->adapter, MSG,
+ "11h: issuing DFS Radar check for channel=%d\n",
+ radar_params->chandef->chan->hw_value);
return 0;
}
@@ -174,8 +174,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
void mwifiex_abort_cac(struct mwifiex_private *priv)
{
if (priv->wdev.cac_started) {
- dev_dbg(priv->adapter->dev,
- "Aborting delayed work for CAC.\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "Aborting delayed work for CAC.\n");
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
@@ -199,7 +199,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
sizeof(u32));
if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) {
- dev_err(priv->adapter->dev, "Error in channel report event\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Error in channel report event\n");
return -1;
}
@@ -212,8 +213,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
switch (le16_to_cpu(rpt->header.type)) {
case TLV_TYPE_CHANRPT_11H_BASIC:
if (rpt->map.radar) {
- dev_notice(priv->adapter->dev,
- "RADAR Detected on channel %d!\n",
+ mwifiex_dbg(priv->adapter, MSG,
+ "RADAR Detected on channel %d!\n",
priv->dfs_chandef.chan->hw_value);
cancel_delayed_work_sync(&priv->dfs_cac_work);
cfg80211_cac_event(priv->netdev,
@@ -242,16 +243,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
rdr_event = (void *)(skb->data + sizeof(u32));
if (le32_to_cpu(rdr_event->passed)) {
- dev_notice(priv->adapter->dev,
- "radar detected; indicating kernel\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "radar detected; indicating kernel\n");
cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
GFP_KERNEL);
- dev_dbg(priv->adapter->dev, "regdomain: %d\n",
- rdr_event->reg_domain);
- dev_dbg(priv->adapter->dev, "radar detection type: %d\n",
- rdr_event->det_type);
+ mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+ rdr_event->reg_domain);
+ mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+ rdr_event->det_type);
} else {
- dev_dbg(priv->adapter->dev, "false radar detection event!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "false radar detection event!\n");
}
return 0;
@@ -276,20 +278,20 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
bss_cfg = &priv->bss_cfg;
if (!bss_cfg->beacon_period) {
- dev_err(priv->adapter->dev,
- "channel switch: AP already stopped\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "channel switch: AP already stopped\n");
return;
}
mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef);
if (mwifiex_config_start_uap(priv, bss_cfg)) {
- dev_dbg(priv->adapter->dev,
- "Failed to start AP after channel switch\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to start AP after channel switch\n");
return;
}
- dev_notice(priv->adapter->dev,
- "indicating channel switch completion to kernel\n");
+ mwifiex_dbg(priv->adapter, MSG,
+ "indicating channel switch completion to kernel\n");
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
}
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 433bd6837c79..8422986cd7a9 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -42,7 +42,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
priv->wdev.wiphy->bands[radio_type];
if (WARN_ON_ONCE(!sband)) {
- dev_err(priv->adapter->dev, "Invalid radio type!\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Invalid radio type!\n");
return -EINVAL;
}
@@ -184,7 +184,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
if (tx_ba_tbl) {
- dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+ mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n");
tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
priv->add_ba_param.tx_amsdu &&
@@ -197,7 +197,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
ra_list->ba_status = BA_SETUP_COMPLETE;
}
} else {
- dev_err(priv->adapter->dev, "BA stream not created\n");
+ mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n");
}
return 0;
@@ -224,7 +224,8 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
tx_buf->action = cpu_to_le16(action);
switch (action) {
case HostCmd_ACT_GEN_SET:
- dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: set tx_buf=%d\n", *buf_size);
tx_buf->buff_size = cpu_to_le16(*buf_size);
break;
case HostCmd_ACT_GEN_GET:
@@ -466,7 +467,8 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
return;
- dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
list_del(&tx_ba_tsr_tbl->list);
@@ -563,7 +565,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
unsigned long flags;
u16 block_ack_param_set;
- dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
+ mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
@@ -575,9 +577,9 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- dev_warn(priv->adapter->dev,
- "BA setup with unknown TDLS peer %pM!\n",
- peer_mac);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "BA setup with unknown TDLS peer %pM!\n",
+ peer_mac);
return -1;
}
if (sta_ptr->is_11ac_enabled)
@@ -706,8 +708,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
- dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
- __func__, rx_reo_tbl->tid);
+ mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
+ __func__, rx_reo_tbl->tid);
memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
rx_reo_tbl++;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 6183e255e62a..f7c717253a66 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -187,7 +187,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
GFP_ATOMIC | GFP_DMA);
if (!skb_aggr) {
- dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
return -1;
@@ -297,13 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
- __func__, ret);
+ mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
return 0;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index f75f8acfaca0..39d7a957674c 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -51,8 +51,8 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
rx_skb = __skb_dequeue(&list);
ret = mwifiex_recv_packet(priv, rx_skb);
if (ret == -1)
- dev_err(priv->adapter->dev,
- "Rx of A-MSDU failed");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Rx of A-MSDU failed");
}
return 0;
}
@@ -304,7 +304,7 @@ mwifiex_flush_data(unsigned long context)
if (seq_num < 0)
return;
- dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+ mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
@@ -367,8 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
}
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n",
- last_seq, new_node->start_win);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: last_seq=%d start_win=%d\n",
+ last_seq, new_node->start_win);
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win) {
@@ -382,8 +383,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
GFP_KERNEL);
if (!new_node->rx_reorder_ptr) {
kfree((u8 *) new_node);
- dev_err(priv->adapter->dev,
- "%s: failed to alloc reorder_ptr\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: failed to alloc reorder_ptr\n", __func__);
return;
}
@@ -467,9 +468,9 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- dev_warn(priv->adapter->dev,
- "BA setup with unknown TDLS peer %pM!\n",
- cmd_addba_req->peer_mac_addr);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "BA setup with unknown TDLS peer %pM!\n",
+ cmd_addba_req->peer_mac_addr);
return -1;
}
if (sta_ptr->is_11ac_enabled)
@@ -573,14 +574,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
}
if (tbl->flags & RXREOR_FORCE_NO_DROP) {
- dev_dbg(priv->adapter->dev,
- "RXREOR_FORCE_NO_DROP when HS is activated\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "RXREOR_FORCE_NO_DROP when HS is activated\n");
tbl->flags &= ~RXREOR_FORCE_NO_DROP;
} else if (init_window_shift && seq_num < start_win &&
seq_num >= tbl->init_win) {
- dev_dbg(priv->adapter->dev,
- "Sender TID sequence number reset %d->%d for SSN %d\n",
- start_win, seq_num, tbl->init_win);
+ mwifiex_dbg(priv->adapter, INFO,
+ "Sender TID sequence number reset %d->%d for SSN %d\n",
+ start_win, seq_num, tbl->init_win);
tbl->start_win = start_win = seq_num;
end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
} else {
@@ -668,23 +669,23 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
else
cleanup_rx_reorder_tbl = (initiator) ? false : true;
- dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
- peer_mac, tid, initiator);
+ mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n",
+ peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
- dev_dbg(priv->adapter->dev,
- "event: TID, TA not found in table\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
- dev_dbg(priv->adapter->dev,
- "event: TID, RA not found in table\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: TID, RA not found in table\n");
return;
}
ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
@@ -721,8 +722,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
* the stream
*/
if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
- dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
- add_ba_rsp->peer_mac_addr, tid);
+ mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
+ add_ba_rsp->peer_mac_addr, tid);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
@@ -746,8 +747,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
tbl->amsdu = false;
}
- dev_dbg(priv->adapter->dev,
- "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
return 0;
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index aa01c9bc77f9..48edf387683e 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -12,6 +12,7 @@ config MWIFIEX_SDIO
tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
depends on MWIFIEX && MMC
select FW_LOADER
+ select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
8786/8787/8797/8887/8897 chipsets with SDIO interface.
@@ -23,6 +24,7 @@ config MWIFIEX_PCIE
tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
depends on MWIFIEX && PCI
select FW_LOADER
+ select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
8766/8897 chipsets with PCIe interface.
diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README
index 31928caeeed2..2f0f9b5609d0 100644
--- a/drivers/net/wireless/mwifiex/README
+++ b/drivers/net/wireless/mwifiex/README
@@ -230,9 +230,9 @@ getlog
cat getlog
-fw_dump
- This command is used to dump firmware memory into files.
- Separate file will be created for each memory segment.
+device_dump
+ This command is used to dump driver information and firmware memory
+ segments.
Usage:
cat fw_dump
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index bf9020ff2d33..4eecedadefbf 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -104,11 +104,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
- wiphy_err(wiphy, "deleting the crypto keys\n");
+ mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n");
return -EFAULT;
}
- wiphy_dbg(wiphy, "info: crypto keys deleted\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: crypto keys deleted\n");
return 0;
}
@@ -163,7 +163,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
if (!buf || !len) {
- wiphy_err(wiphy, "invalid buffer and length\n");
+ mwifiex_dbg(priv->adapter, ERROR, "invalid buffer and length\n");
return -EFAULT;
}
@@ -172,8 +172,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
ieee80211_is_probe_resp(mgmt->frame_control)) {
/* Since we support offload probe resp, we need to skip probe
* resp in AP or GO mode */
- wiphy_dbg(wiphy,
- "info: skip to send probe resp in AP or GO mode\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: skip to send probe resp in AP or GO mode\n");
return 0;
}
@@ -183,7 +183,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
pkt_len + sizeof(pkt_len));
if (!skb) {
- wiphy_err(wiphy, "allocate skb failed for management frame\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
return -ENOMEM;
}
@@ -206,7 +207,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mwifiex_queue_tx_pkt(priv, skb);
- wiphy_dbg(wiphy, "info: management frame transmitted\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: management frame transmitted\n");
return 0;
}
@@ -231,7 +232,7 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
&priv->mgmt_frame_mask, false);
- wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: mgmt frame registered\n");
}
}
@@ -248,13 +249,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
int ret;
if (!chan || !cookie) {
- wiphy_err(wiphy, "Invalid parameter for ROC\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Invalid parameter for ROC\n");
return -EINVAL;
}
if (priv->roc_cfg.cookie) {
- wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n",
- priv->roc_cfg.cookie);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ongoing ROC, cookie = 0x%llx\n",
+ priv->roc_cfg.cookie);
return -EBUSY;
}
@@ -269,7 +271,8 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
cfg80211_ready_on_channel(wdev, *cookie, chan,
duration, GFP_ATOMIC);
- wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ROC, cookie = 0x%llx\n", *cookie);
}
return ret;
@@ -298,7 +301,8 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
- wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: cancel ROC, cookie = 0x%llx\n", cookie);
}
return ret;
@@ -344,8 +348,8 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
u32 ps_mode;
if (timeout)
- wiphy_dbg(wiphy,
- "info: ignore timeout value for IEEE Power Save\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ignore timeout value for IEEE Power Save\n");
ps_mode = enabled;
@@ -370,7 +374,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
priv->wep_key_curr_index = key_index;
} else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
NULL, 0)) {
- wiphy_err(wiphy, "set default Tx key index\n");
+ mwifiex_dbg(priv->adapter, ERROR, "set default Tx key index\n");
return -EFAULT;
}
@@ -407,7 +411,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
if (mwifiex_set_encode(priv, params, params->key, params->key_len,
key_index, peer_mac, 0)) {
- wiphy_err(wiphy, "crypto keys added\n");
+ mwifiex_dbg(priv->adapter, ERROR, "crypto keys added\n");
return -EFAULT;
}
@@ -442,7 +446,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
band = mwifiex_band_to_radio_type(adapter->config_bands);
if (!wiphy->bands[band]) {
- wiphy_err(wiphy, "11D: setting domain info in FW\n");
+ mwifiex_dbg(adapter, ERROR,
+ "11D: setting domain info in FW\n");
return -1;
}
@@ -493,7 +498,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- wiphy_err(wiphy, "11D: setting domain info in FW\n");
+ mwifiex_dbg(adapter, INFO,
+ "11D: setting domain info in FW\n");
return -1;
}
@@ -516,9 +522,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv = mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY);
-
- wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
- request->alpha2[0], request->alpha2[1]);
+ mwifiex_dbg(adapter, INFO,
+ "info: cfg80211 regulatory domain callback for %c%c\n",
+ request->alpha2[0], request->alpha2[1]);
switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
@@ -527,8 +533,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
break;
default:
- wiphy_err(wiphy, "unknown regdom initiator: %d\n",
- request->initiator);
+ mwifiex_dbg(adapter, ERROR,
+ "unknown regdom initiator: %d\n",
+ request->initiator);
return;
}
@@ -597,8 +604,8 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
switch (priv->bss_role) {
case MWIFIEX_BSS_ROLE_UAP:
if (priv->bss_started) {
- dev_err(adapter->dev,
- "cannot change wiphy params when bss started");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot change wiphy params when bss started");
return -EINVAL;
}
@@ -622,15 +629,16 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
kfree(bss_cfg);
if (ret) {
- wiphy_err(wiphy, "Failed to set wiphy phy params\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to set wiphy phy params\n");
return ret;
}
break;
case MWIFIEX_BSS_ROLE_STA:
if (priv->media_connected) {
- dev_err(adapter->dev,
- "cannot change wiphy params when connected");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot change wiphy params when connected");
return -EINVAL;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -724,8 +732,8 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
HostCmd_ACT_GEN_SET, 0,
&priv->mgmt_frame_mask, false)) {
- dev_warn(priv->adapter->dev,
- "could not unregister mgmt frame rx\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not unregister mgmt frame rx\n");
return -1;
}
@@ -789,9 +797,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
break;
default:
- dev_err(priv->adapter->dev,
- "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
@@ -824,12 +832,13 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (adapter->curr_iface_comb.p2p_intf ==
adapter->iface_limit.p2p_intf) {
- dev_err(adapter->dev,
- "cannot create multiple P2P ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple P2P ifaces\n");
return -1;
}
- dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to p2p\n", dev->name);
if (mwifiex_deinit_priv_params(priv))
return -1;
@@ -846,9 +855,9 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
return -EFAULT;
break;
default:
- dev_err(priv->adapter->dev,
- "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
@@ -897,17 +906,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
curr_iftype != NL80211_IFTYPE_P2P_GO) &&
(adapter->curr_iface_comb.sta_intf ==
adapter->iface_limit.sta_intf)) {
- dev_err(adapter->dev,
- "cannot create multiple station/adhoc ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple station/adhoc ifaces\n");
return -1;
}
if (type == NL80211_IFTYPE_STATION)
- dev_notice(adapter->dev,
- "%s: changing role to station\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to station\n", dev->name);
else
- dev_notice(adapter->dev,
- "%s: changing role to adhoc\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to adhoc\n", dev->name);
if (mwifiex_deinit_priv_params(priv))
return -1;
@@ -954,12 +963,13 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
if (adapter->curr_iface_comb.uap_intf ==
adapter->iface_limit.uap_intf) {
- dev_err(adapter->dev,
- "cannot create multiple AP ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple AP ifaces\n");
return -1;
}
- dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "%s: changing role to AP\n", dev->name);
if (mwifiex_deinit_priv_params(priv))
return -1;
@@ -1020,12 +1030,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as IBSS\n", dev->name);
case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -1048,12 +1060,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as STA\n", dev->name);
case NL80211_IFTYPE_STATION: /* This shouldn't happen */
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -1070,12 +1084,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
type, flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as AP\n", dev->name);
case NL80211_IFTYPE_AP: /* This shouldn't happen */
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
@@ -1100,19 +1116,22 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
flags, params);
case NL80211_IFTYPE_UNSPECIFIED:
- wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: kept type as P2P\n", dev->name);
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return 0;
default:
- wiphy_err(wiphy, "%s: changing to %d not supported\n",
- dev->name, type);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: changing to %d not supported\n",
+ dev->name, type);
return -EOPNOTSUPP;
}
break;
default:
- wiphy_err(wiphy, "%s: unknown iftype: %d\n",
- dev->name, dev->ieee80211_ptr->iftype);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: unknown iftype: %d\n",
+ dev->name, dev->ieee80211_ptr->iftype);
return -EOPNOTSUPP;
}
@@ -1206,12 +1225,14 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/* Get signal information from the firmware */
if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, true)) {
- dev_err(priv->adapter->dev, "failed to get signal information\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "failed to get signal information\n");
return -EFAULT;
}
if (mwifiex_drv_get_data_rate(priv, &rate)) {
- dev_err(priv->adapter->dev, "getting data rate\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "getting data rate error\n");
return -EFAULT;
}
@@ -1295,7 +1316,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
enum ieee80211_band band;
- dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx);
+ mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
memset(survey, 0, sizeof(struct survey_info));
@@ -1472,8 +1493,8 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
struct mwifiex_adapter *adapter = priv->adapter;
if (!priv->media_connected) {
- dev_err(adapter->dev,
- "Can not set Tx data rate in disconnected state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Can not set Tx data rate in disconnected state\n");
return -EINVAL;
}
@@ -1556,17 +1577,20 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
- wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: bss_type mismatched\n", __func__);
return -EINVAL;
}
if (!priv->bss_started) {
- wiphy_err(wiphy, "%s: bss not started\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: bss not started\n", __func__);
return -EINVAL;
}
if (mwifiex_set_mgmt_ies(priv, data)) {
- wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: setting mgmt ies failed\n", __func__);
return -EFAULT;
}
@@ -1594,7 +1618,8 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (!params->mac || is_broadcast_ether_addr(params->mac))
return 0;
- wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
+ mwifiex_dbg(priv->adapter, INFO, "%s: mac address %pM\n",
+ __func__, params->mac);
eth_zero_addr(deauth_mac);
@@ -1687,14 +1712,16 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
mwifiex_abort_cac(priv);
if (mwifiex_del_mgmt_ies(priv))
- wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to delete mgmt IEs!\n");
priv->ap_11n_enabled = 0;
memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg));
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- wiphy_err(wiphy, "Failed to stop the BSS\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to stop the BSS\n");
return -1;
}
@@ -1756,7 +1783,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
kfree(bss_cfg);
- wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to parse secuirty parameters!\n");
return -1;
}
@@ -1778,17 +1806,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_is_11h_active(priv) &&
!cfg80211_chandef_dfs_required(wiphy, &params->chandef,
priv->bss_mode)) {
- dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "Disable 11h extensions in FW\n");
if (mwifiex_11h_activate(priv, false)) {
- dev_err(priv->adapter->dev,
- "Failed to disable 11h extensions!!");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to disable 11h extensions!!");
return -1;
}
priv->state_11h.is_11h_active = true;
}
if (mwifiex_config_start_uap(priv, bss_cfg)) {
- wiphy_err(wiphy, "Failed to start AP\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to start AP\n");
kfree(bss_cfg);
return -1;
}
@@ -1816,8 +1846,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
- " reason code %d\n", priv->cfg_bssid, reason_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: successfully disconnected from %pM:\t"
+ "reason code %d\n", priv->cfg_bssid, reason_code);
eth_zero_addr(priv->cfg_bssid);
priv->hs2_enabled = false;
@@ -1899,13 +1930,13 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
req_ssid.ssid_len = ssid_len;
if (ssid_len > IEEE80211_MAX_SSID_LEN) {
- dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+ mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
return -EINVAL;
}
memcpy(req_ssid.ssid, ssid, ssid_len);
if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
- dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+ mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
return -EINVAL;
}
@@ -1959,9 +1990,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
if (sme->key) {
if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) {
- dev_dbg(priv->adapter->dev,
- "info: setting wep encryption"
- " with key len %d\n", sme->key_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: setting wep encryption\t"
+ "with key len %d\n", sme->key_len);
priv->wep_key_curr_index = sme->key_idx;
ret = mwifiex_set_encode(priv, NULL, sme->key,
sme->key_len, sme->key_idx,
@@ -1978,7 +2009,7 @@ done:
if (is_scanning_required) {
/* Do specific SSID scanning */
if (mwifiex_request_scan(priv, &req_ssid)) {
- dev_err(priv->adapter->dev, "scan error\n");
+ mwifiex_dbg(priv->adapter, ERROR, "scan error\n");
return -EFAULT;
}
}
@@ -1997,15 +2028,15 @@ done:
if (!bss) {
if (is_scanning_required) {
- dev_warn(priv->adapter->dev,
- "assoc: requested bss not found in scan results\n");
+ mwifiex_dbg(priv->adapter, WARN,
+ "assoc: requested bss not found in scan results\n");
break;
}
is_scanning_required = 1;
} else {
- dev_dbg(priv->adapter->dev,
- "info: trying to associate to '%s' bssid %pM\n",
- (char *) req_ssid.ssid, bss->bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: trying to associate to '%s' bssid %pM\n",
+ (char *)req_ssid.ssid, bss->bssid);
memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
break;
}
@@ -2041,26 +2072,29 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
int ret;
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
- wiphy_err(wiphy,
- "%s: reject infra assoc request in non-STA role\n",
- dev->name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: reject infra assoc request in non-STA role\n",
+ dev->name);
return -EINVAL;
}
if (priv->wdev.current_bss) {
- wiphy_warn(wiphy, "%s: already connected\n", dev->name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: already connected\n", dev->name);
return -EALREADY;
}
if (adapter->surprise_removed || adapter->is_cmd_timedout) {
- wiphy_err(wiphy,
- "%s: Ignore connection. Card removed or FW in bad state\n",
- dev->name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Ignore connection.\t"
+ "Card removed or FW in bad state\n",
+ dev->name);
return -EFAULT;
}
- wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
- (char *) sme->ssid, sme->bssid);
+ mwifiex_dbg(adapter, INFO,
+ "info: Trying to associate to %s and bssid %pM\n",
+ (char *)sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
@@ -2068,17 +2102,17 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
NULL, 0, WLAN_STATUS_SUCCESS,
GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: associated to bssid %pM successfully\n",
- priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: associated to bssid %pM successfully\n",
+ priv->cfg_bssid);
if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->auto_tdls &&
priv->bss_type == MWIFIEX_BSS_TYPE_STA)
mwifiex_setup_auto_tdls_timer(priv);
} else {
- dev_dbg(priv->adapter->dev,
- "info: association to bssid %pM failed\n",
- priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: association to bssid %pM failed\n",
+ priv->cfg_bssid);
eth_zero_addr(priv->cfg_bssid);
if (ret > 0)
@@ -2105,7 +2139,6 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
struct cfg80211_ibss_params *params)
{
- struct wiphy *wiphy = priv->wdev.wiphy;
struct mwifiex_adapter *adapter = priv->adapter;
int index = 0, i;
u8 config_bands = 0;
@@ -2162,8 +2195,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
priv->adhoc_channel = ieee80211_frequency_to_channel(
params->chandef.chan->center_freq);
- wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
- config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
+ mwifiex_dbg(adapter, INFO,
+ "info: set ibss band %d, chan %d, chan offset %d\n",
+ config_bands, priv->adhoc_channel,
+ adapter->sec_chan_offset);
return 0;
}
@@ -2182,13 +2217,15 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
int ret = 0;
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
- wiphy_err(wiphy, "request to join ibss received "
- "when station is not in ibss mode\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "request to join ibss received\t"
+ "when station is not in ibss mode\n");
goto done;
}
- wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
- (char *) params->ssid, params->bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: trying to join to %s and bssid %pM\n",
+ (char *)params->ssid, params->bssid);
mwifiex_set_ibss_params(priv, params);
@@ -2200,12 +2237,12 @@ done:
if (!ret) {
cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
params->chandef.chan, GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: joined/created adhoc network with bssid"
- " %pM successfully\n", priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, MSG,
+ "info: joined/created adhoc network with bssid\t"
+ "%pM successfully\n", priv->cfg_bssid);
} else {
- dev_dbg(priv->adapter->dev,
- "info: failed creating/joining adhoc network\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: failed creating/joining adhoc network\n");
}
return ret;
@@ -2222,8 +2259,8 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
- priv->cfg_bssid);
+ mwifiex_dbg(priv->adapter, MSG, "info: disconnecting from essid %pM\n",
+ priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -2250,13 +2287,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
struct ieee_types_header *ie;
struct mwifiex_user_scan_cfg *user_scan_cfg;
- wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: received scan request on %s\n", dev->name);
/* Block scan request if scan operation or scan cleanup when interface
* is disabled is in process
*/
if (priv->scan_request || priv->scan_aborting) {
- dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+ mwifiex_dbg(priv->adapter, WARN,
+ "cmd: Scan already in process..\n");
return -EBUSY;
}
@@ -2308,7 +2347,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
ret = mwifiex_scan_networks(priv, user_scan_cfg);
kfree(user_scan_cfg);
if (ret) {
- dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "scan failed: %d\n", ret);
priv->scan_aborting = false;
priv->scan_request = NULL;
return ret;
@@ -2454,15 +2494,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_ADHOC:
if (adapter->curr_iface_comb.sta_intf ==
adapter->iface_limit.sta_intf) {
- wiphy_err(wiphy,
- "cannot create multiple sta/adhoc ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple sta/adhoc ifaces\n");
return ERR_PTR(-EINVAL);
}
priv = mwifiex_get_unused_priv(adapter);
if (!priv) {
- wiphy_err(wiphy,
- "could not get free private struct\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not get free private struct\n");
return ERR_PTR(-EFAULT);
}
@@ -2484,15 +2524,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
if (adapter->curr_iface_comb.uap_intf ==
adapter->iface_limit.uap_intf) {
- wiphy_err(wiphy,
- "cannot create multiple AP ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple AP ifaces\n");
return ERR_PTR(-EINVAL);
}
priv = mwifiex_get_unused_priv(adapter);
if (!priv) {
- wiphy_err(wiphy,
- "could not get free private struct\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not get free private struct\n");
return ERR_PTR(-EFAULT);
}
@@ -2511,15 +2551,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_CLIENT:
if (adapter->curr_iface_comb.p2p_intf ==
adapter->iface_limit.p2p_intf) {
- wiphy_err(wiphy,
- "cannot create multiple P2P ifaces\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create multiple P2P ifaces\n");
return ERR_PTR(-EINVAL);
}
priv = mwifiex_get_unused_priv(adapter);
if (!priv) {
- wiphy_err(wiphy,
- "could not get free private struct\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not get free private struct\n");
return ERR_PTR(-EFAULT);
}
@@ -2550,7 +2590,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
break;
default:
- wiphy_err(wiphy, "type not supported\n");
+ mwifiex_dbg(adapter, ERROR, "type not supported\n");
return ERR_PTR(-EINVAL);
}
@@ -2558,7 +2598,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
name_assign_type, ether_setup,
IEEE80211_NUM_ACS, 1);
if (!dev) {
- wiphy_err(wiphy, "no memory available for netdevice\n");
+ mwifiex_dbg(adapter, ERROR,
+ "no memory available for netdevice\n");
memset(&priv->wdev, 0, sizeof(priv->wdev));
priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2599,7 +2640,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
/* Register network device */
if (register_netdevice(dev)) {
- wiphy_err(wiphy, "cannot register virtual network device\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
@@ -2613,7 +2655,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
WQ_MEM_RECLAIM |
WQ_UNBOUND, 1, name);
if (!priv->dfs_cac_workqueue) {
- wiphy_err(wiphy, "cannot register virtual network device\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
@@ -2628,7 +2671,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, name);
if (!priv->dfs_chan_sw_workqueue) {
- wiphy_err(wiphy, "cannot register virtual network device\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register virtual network device\n");
free_netdev(dev);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->netdev = NULL;
@@ -2642,7 +2686,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
sema_init(&priv->async_sem, 1);
- dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: Marvell 802.11 Adapter\n", dev->name);
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_init(priv);
@@ -2661,7 +2706,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
adapter->curr_iface_comb.p2p_intf++;
break;
default:
- wiphy_err(wiphy, "type not supported\n");
+ mwifiex_dbg(adapter, ERROR, "type not supported\n");
return ERR_PTR(-EINVAL);
}
@@ -2721,7 +2766,8 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
adapter->curr_iface_comb.p2p_intf++;
break;
default:
- dev_err(adapter->dev, "del_virtual_intf: type not supported\n");
+ mwifiex_dbg(adapter, ERROR,
+ "del_virtual_intf: type not supported\n");
break;
}
@@ -2839,7 +2885,8 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
byte_seq,
MWIFIEX_MEF_MAX_BYTESEQ)) {
- dev_err(priv->adapter->dev, "Pattern not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Pattern not supported\n");
kfree(mef_entry);
return -EOPNOTSUPP;
}
@@ -2954,21 +3001,22 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mwifiex_cancel_all_pending_cmd(adapter);
if (!wowlan) {
- dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
+ mwifiex_dbg(adapter, ERROR,
+ "None of the WOWLAN triggers enabled\n");
return 0;
}
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
if (!priv->media_connected) {
- dev_warn(adapter->dev,
- "Can not configure WOWLAN in disconnected state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Can not configure WOWLAN in disconnected state\n");
return 0;
}
ret = mwifiex_set_mef_filter(priv, wowlan);
if (ret) {
- dev_err(adapter->dev, "Failed to set MEF filter\n");
+ mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n");
return ret;
}
@@ -2981,7 +3029,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
MWIFIEX_SYNC_CMD, &hs_cfg);
if (ret) {
- dev_err(adapter->dev, "Failed to set HS params\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to set HS params\n");
return ret;
}
}
@@ -3041,7 +3090,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
if (!mwifiex_is_pattern_supported(&crule->patterns[i],
byte_seq,
MWIFIEX_COALESCE_MAX_BYTESEQ)) {
- dev_err(priv->adapter->dev, "Pattern not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Pattern not supported\n");
return -EOPNOTSUPP;
}
@@ -3050,8 +3100,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
if (pkt_type && mrule->pkt_type) {
- dev_err(priv->adapter->dev,
- "Multiple packet types not allowed\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Multiple packet types not allowed\n");
return -EOPNOTSUPP;
} else if (pkt_type) {
mrule->pkt_type = pkt_type;
@@ -3074,8 +3124,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
}
if (!mrule->pkt_type) {
- dev_err(priv->adapter->dev,
- "Packet type can not be determined\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Packet type can not be determined\n");
return -EOPNOTSUPP;
}
@@ -3093,8 +3143,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
if (!coalesce) {
- dev_dbg(adapter->dev,
- "Disable coalesce and reset all previous rules\n");
+ mwifiex_dbg(adapter, WARN,
+ "Disable coalesce and reset all previous rules\n");
return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
HostCmd_ACT_GEN_SET, 0,
&coalesce_cfg, true);
@@ -3105,8 +3155,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
&coalesce_cfg.rule[i]);
if (ret) {
- dev_err(priv->adapter->dev,
- "Recheck the patterns provided for rule %d\n",
+ mwifiex_dbg(adapter, ERROR,
+ "Recheck the patterns provided for rule %d\n",
i + 1);
return ret;
}
@@ -3138,9 +3188,9 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Setup Request to %pM status_code=%d\n", peer,
- status_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Setup Request to %pM status_code=%d\n",
+ peer, status_code);
mwifiex_add_auto_tdls_peer(priv, peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
@@ -3148,45 +3198,45 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
break;
case WLAN_TDLS_SETUP_RESPONSE:
mwifiex_add_auto_tdls_peer(priv, peer);
- dev_dbg(priv->adapter->dev,
- "Send TDLS Setup Response to %pM status_code=%d\n",
- peer, status_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Setup Response to %pM status_code=%d\n",
+ peer, status_code);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_SETUP_CONFIRM:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Confirm to %pM status_code=%d\n", peer,
- status_code);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Confirm to %pM status_code=%d\n", peer,
+ status_code);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_TEARDOWN:
- dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
- peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Tear down to %pM\n", peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_TDLS_DISCOVERY_REQUEST:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Discovery Request to %pM\n", peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Discovery Request to %pM\n", peer);
ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
- dev_dbg(priv->adapter->dev,
- "Send TDLS Discovery Response to %pM\n", peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "Send TDLS Discovery Response to %pM\n", peer);
ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
dialog_token, status_code,
extra_ies, extra_ies_len);
break;
default:
- dev_warn(priv->adapter->dev,
- "Unknown TDLS mgmt/action frame %pM\n", peer);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Unknown TDLS mgmt/action frame %pM\n", peer);
ret = -EINVAL;
break;
}
@@ -3208,8 +3258,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
return -ENOTSUPP;
- dev_dbg(priv->adapter->dev,
- "TDLS peer=%pM, oper=%d\n", peer, action);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS peer=%pM, oper=%d\n", peer, action);
switch (action) {
case NL80211_TDLS_ENABLE_LINK:
@@ -3220,22 +3270,22 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
break;
case NL80211_TDLS_TEARDOWN:
/* shouldn't happen!*/
- dev_warn(priv->adapter->dev,
- "tdls_oper: teardown from driver not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: teardown from driver not supported\n");
return -EINVAL;
case NL80211_TDLS_SETUP:
/* shouldn't happen!*/
- dev_warn(priv->adapter->dev,
- "tdls_oper: setup from driver not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: setup from driver not supported\n");
return -EINVAL;
case NL80211_TDLS_DISCOVERY_REQ:
/* shouldn't happen!*/
- dev_warn(priv->adapter->dev,
- "tdls_oper: discovery from driver not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: discovery from driver not supported\n");
return -EINVAL;
default:
- dev_err(priv->adapter->dev,
- "tdls_oper: operation not supported\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls_oper: operation not supported\n");
return -ENOTSUPP;
}
@@ -3268,8 +3318,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (priv->adapter->scan_processing) {
- dev_err(priv->adapter->dev,
- "radar detection: scan in process...\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "radar detection: scan in process...\n");
return -EBUSY;
}
@@ -3284,8 +3334,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->beacon_csa.tail,
params->beacon_csa.tail_len);
if (!chsw_ie) {
- dev_err(priv->adapter->dev,
- "Could not parse channel switch announcement IE\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Could not parse channel switch announcement IE\n");
return -EINVAL;
}
@@ -3297,10 +3347,12 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
}
if (mwifiex_del_mgmt_ies(priv))
- wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to delete mgmt IEs!\n");
if (mwifiex_set_mgmt_ies(priv, &params->beacon_csa)) {
- wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: setting mgmt ies failed\n", __func__);
return -EFAULT;
}
@@ -3324,16 +3376,17 @@ mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct mwifiex_radar_params radar_params;
if (priv->adapter->scan_processing) {
- dev_err(priv->adapter->dev,
- "radar detection: scan already in process...\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "radar detection: scan already in process...\n");
return -EBUSY;
}
if (!mwifiex_is_11h_active(priv)) {
- dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "Enable 11h extensions in FW\n");
if (mwifiex_11h_activate(priv, true)) {
- dev_err(priv->adapter->dev,
- "Failed to activate 11h extensions!!");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to activate 11h extensions!!");
return -1;
}
priv->state_11h.is_11h_active = true;
@@ -3492,7 +3545,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy = wiphy_new(&mwifiex_cfg80211_ops,
sizeof(struct mwifiex_adapter *));
if (!wiphy) {
- dev_err(adapter->dev, "%s: creating new wiphy\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: creating new wiphy\n", __func__);
return -ENOMEM;
}
wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
@@ -3563,20 +3617,22 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
ret = wiphy_register(wiphy);
if (ret < 0) {
- dev_err(adapter->dev,
- "%s: wiphy_register failed: %d\n", __func__, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: wiphy_register failed: %d\n", __func__, ret);
wiphy_free(wiphy);
return ret;
}
if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
- wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
+ mwifiex_dbg(adapter, INFO,
+ "driver hint alpha2: %2.2s\n", reg_alpha2);
regulatory_hint(wiphy, reg_alpha2);
} else {
country_code = mwifiex_11d_code_2_region(adapter->region_code);
if (country_code)
- wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
- country_code);
+ mwifiex_dbg(adapter, WARN,
+ "ignoring F/W country code %2.2s\n",
+ country_code);
}
mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index e9df8826f124..3ddb8ec676ed 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -327,8 +327,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband) {
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
- __func__, band);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: cannot find cfp by band %d\n",
+ __func__, band);
return cfp;
}
@@ -349,9 +350,10 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
}
}
if (i == sband->n_channels) {
- dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
- " & channel=%d freq=%d\n", __func__, band, channel,
- freq);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: cannot find cfp by band %d\t"
+ "& channel=%d freq=%d\n",
+ __func__, band, channel, freq);
} else {
if (!ch)
return cfp;
@@ -431,15 +433,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
switch (adapter->config_bands) {
case BAND_B:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_b\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_b\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_b,
sizeof(supported_rates_b));
break;
case BAND_G:
case BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_g\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_g\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
sizeof(supported_rates_g));
break;
@@ -449,15 +453,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
case BAND_B | BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_bg\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_bg\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
sizeof(supported_rates_bg));
break;
case BAND_A:
case BAND_A | BAND_G:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_a\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_a\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_a,
sizeof(supported_rates_a));
break;
@@ -466,14 +472,16 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_AN | BAND_AAC:
case BAND_A | BAND_G | BAND_AN | BAND_GN:
case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_a\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_a\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_a,
sizeof(supported_rates_a));
break;
case BAND_GN:
- dev_dbg(adapter->dev, "info: infra band=%d "
- "supported_rates_n\n", adapter->config_bands);
+ mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+ "supported_rates_n\n",
+ adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
sizeof(supported_rates_n));
break;
@@ -482,25 +490,25 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
/* Ad-hoc mode */
switch (adapter->adhoc_start_band) {
case BAND_B:
- dev_dbg(adapter->dev, "info: adhoc B\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc B\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_b,
sizeof(adhoc_rates_b));
break;
case BAND_G:
case BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: adhoc G only\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc G only\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_g,
sizeof(adhoc_rates_g));
break;
case BAND_B | BAND_G:
case BAND_B | BAND_G | BAND_GN:
- dev_dbg(adapter->dev, "info: adhoc BG\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc BG\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_bg,
sizeof(adhoc_rates_bg));
break;
case BAND_A:
case BAND_A | BAND_AN:
- dev_dbg(adapter->dev, "info: adhoc A\n");
+ mwifiex_dbg(adapter, INFO, "info: adhoc A\n");
k = mwifiex_copy_rates(rates, k, adhoc_rates_a,
sizeof(adhoc_rates_a));
break;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index c5a14ff7eb82..a1de83fd1dbe 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -62,7 +62,8 @@ mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
if (list_empty(&adapter->cmd_free_q)) {
- dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
+ mwifiex_dbg(adapter, ERROR,
+ "GET_CMD_NODE: cmd node not available\n");
spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
return NULL;
}
@@ -116,7 +117,8 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
{
/* Copy the HOST command to command buffer */
memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len);
- dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: host cmd size = %d\n", pcmd_ptr->len);
return 0;
}
@@ -147,8 +149,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
/* Sanity test */
if (host_cmd == NULL || host_cmd->size == 0) {
- dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
- " or cmd size is 0, not sending\n");
+ mwifiex_dbg(adapter, ERROR,
+ "DNLD_CMD: host_cmd is null\t"
+ "or cmd size is 0, not sending\n");
if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, cmd_node);
@@ -161,8 +164,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
cmd_code != HostCmd_CMD_FUNC_INIT) {
- dev_err(adapter->dev,
- "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
+ mwifiex_dbg(adapter, ERROR,
+ "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
if (cmd_node->wait_q_enabled)
mwifiex_complete_cmd(adapter, cmd_node);
@@ -197,10 +200,12 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
*/
skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
- dev_dbg(adapter->dev,
- "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code,
- le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
- le16_to_cpu(host_cmd->seq_num));
+ mwifiex_dbg(adapter, CMD,
+ "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+ cmd_code,
+ le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)),
+ cmd_size, le16_to_cpu(host_cmd->seq_num));
+ mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
if (adapter->iface_type == MWIFIEX_USB) {
tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
@@ -221,7 +226,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
}
if (ret == -1) {
- dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "DNLD_CMD: host to card failed\n");
if (adapter->iface_type == MWIFIEX_USB)
adapter->cmd_sent = false;
if (cmd_node->wait_q_enabled)
@@ -280,12 +286,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
(adapter->seq_num, priv->bss_num,
priv->bss_type)));
- dev_dbg(adapter->dev,
- "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+ mwifiex_dbg(adapter, CMD,
+ "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
le16_to_cpu(sleep_cfm_buf->command),
le16_to_cpu(sleep_cfm_buf->action),
le16_to_cpu(sleep_cfm_buf->size),
le16_to_cpu(sleep_cfm_buf->seq_num));
+ mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf,
+ le16_to_cpu(sleep_cfm_buf->size));
if (adapter->iface_type == MWIFIEX_USB) {
sleep_cfm_tmp =
@@ -311,7 +319,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
}
if (ret == -1) {
- dev_err(adapter->dev, "SLEEP_CFM: failed\n");
+ mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n");
adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
return -1;
}
@@ -362,8 +370,9 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
if (!cmd_array[i].skb) {
- dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
- return -1;
+ mwifiex_dbg(adapter, ERROR,
+ "unable to allocate command buffer\n");
+ return -ENOMEM;
}
}
@@ -386,7 +395,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
/* Need to check if cmd pool is allocated or not */
if (!adapter->cmd_pool) {
- dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
+ mwifiex_dbg(adapter, FATAL,
+ "info: FREE_CMD_BUF: cmd_pool is null\n");
return 0;
}
@@ -395,7 +405,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
/* Release shared memory buffers */
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
if (cmd_array[i].skb) {
- dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: free cmd buffer %d\n", i);
dev_kfree_skb_any(cmd_array[i].skb);
}
if (!cmd_array[i].resp_skb)
@@ -409,7 +420,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
}
/* Release struct cmd_ctrl_node */
if (adapter->cmd_pool) {
- dev_dbg(adapter->dev, "cmd: free cmd pool\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: free cmd pool\n");
kfree(adapter->cmd_pool);
adapter->cmd_pool = NULL;
}
@@ -459,7 +471,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
rx_info->bss_type = priv->bss_type;
}
- dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
+ mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause);
+ mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len);
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
ret = mwifiex_process_uap_event(priv);
@@ -498,28 +511,33 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
}
if (adapter->is_suspended) {
- dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: device in suspended state\n");
return -1;
}
if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
- dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: host entering sleep state\n");
return -1;
}
if (adapter->surprise_removed) {
- dev_err(adapter->dev, "PREP_CMD: card is removed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: card is removed\n");
return -1;
}
if (adapter->is_cmd_timedout) {
- dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: FW is in bad state\n");
return -1;
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
if (cmd_no != HostCmd_CMD_FUNC_INIT) {
- dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: FW in reset state\n");
return -1;
}
}
@@ -528,7 +546,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
cmd_node = mwifiex_get_cmd_node(adapter);
if (!cmd_node) {
- dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: no free cmd node\n");
return -1;
}
@@ -536,7 +555,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
if (!cmd_node->cmd_skb) {
- dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: no free cmd buf\n");
return -1;
}
@@ -571,7 +591,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
/* Return error, since the command preparation failed */
if (ret) {
- dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
+ mwifiex_dbg(adapter, ERROR,
+ "PREP_CMD: cmd %#x preparation failed\n",
cmd_no);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
return -1;
@@ -626,7 +647,8 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
atomic_dec(&adapter->cmd_pending);
- dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+ mwifiex_dbg(adapter, CMD,
+ "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
le16_to_cpu(host_cmd->command),
atomic_read(&adapter->cmd_pending));
}
@@ -648,7 +670,7 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
if (!host_cmd) {
- dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
+ mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n");
return;
}
@@ -673,7 +695,8 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
atomic_inc(&adapter->cmd_pending);
- dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
+ mwifiex_dbg(adapter, CMD,
+ "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
command, atomic_read(&adapter->cmd_pending));
}
@@ -699,7 +722,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
/* Check if already in processing */
if (adapter->curr_cmd) {
- dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
+ mwifiex_dbg(adapter, FATAL,
+ "EXEC_NEXT_CMD: cmd in processing\n");
return -1;
}
@@ -721,8 +745,9 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
priv = cmd_node->priv;
if (adapter->ps_state != PS_STATE_AWAKE) {
- dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
- " this should not happen\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: cannot send cmd in sleep state,\t"
+ "this should not happen\n", __func__);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
return ret;
}
@@ -772,8 +797,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
- dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
- le16_to_cpu(resp->command));
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: NULL curr_cmd, %#x\n",
+ le16_to_cpu(resp->command));
return -1;
}
@@ -781,8 +807,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
- dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
- le16_to_cpu(resp->command));
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: %#x been canceled\n",
+ le16_to_cpu(resp->command));
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
@@ -794,7 +821,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Copy original response back to response buffer */
struct mwifiex_ds_misc_cmd *hostcmd;
uint16_t size = le16_to_cpu(resp->size);
- dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
+ mwifiex_dbg(adapter, INFO,
+ "info: host cmd resp size = %d\n", size);
size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
if (adapter->curr_cmd->data_buf) {
hostcmd = adapter->curr_cmd->data_buf;
@@ -822,13 +850,15 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
orig_cmdresp_no;
- dev_dbg(adapter->dev,
- "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
- orig_cmdresp_no, cmdresp_result,
- le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+ mwifiex_dbg(adapter, CMD,
+ "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+ orig_cmdresp_no, cmdresp_result,
+ le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+ mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp,
+ le16_to_cpu(resp->size));
if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
- dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
+ mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n");
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
@@ -852,8 +882,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
/* Check init command response */
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
if (ret) {
- dev_err(adapter->dev, "%s: cmd %#x failed during "
- "initialization\n", __func__, cmdresp_no);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: cmd %#x failed during\t"
+ "initialization\n", __func__, cmdresp_no);
mwifiex_init_fw_complete(adapter);
return -1;
} else if (adapter->last_init_cmd == cmdresp_no)
@@ -888,7 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->is_cmd_timedout = 1;
if (!adapter->curr_cmd) {
- dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd: empty curr_cmd\n");
return;
}
cmd_node = adapter->curr_cmd;
@@ -897,47 +929,60 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
adapter->dbg.timeout_cmd_act =
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
- dev_err(adapter->dev,
- "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
- adapter->dbg.timeout_cmd_id,
- adapter->dbg.timeout_cmd_act);
-
- dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
- adapter->dbg.num_tx_host_to_card_failure);
- dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
- adapter->dbg.num_cmd_host_to_card_failure);
-
- dev_err(adapter->dev, "is_cmd_timedout = %d\n",
- adapter->is_cmd_timedout);
- dev_err(adapter->dev, "num_tx_timeout = %d\n",
- adapter->dbg.num_tx_timeout);
-
- dev_err(adapter->dev, "last_cmd_index = %d\n",
- adapter->dbg.last_cmd_index);
- dev_err(adapter->dev, "last_cmd_id: %*ph\n",
- (int)sizeof(adapter->dbg.last_cmd_id),
- adapter->dbg.last_cmd_id);
- dev_err(adapter->dev, "last_cmd_act: %*ph\n",
- (int)sizeof(adapter->dbg.last_cmd_act),
- adapter->dbg.last_cmd_act);
-
- dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
- adapter->dbg.last_cmd_resp_index);
- dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
- (int)sizeof(adapter->dbg.last_cmd_resp_id),
- adapter->dbg.last_cmd_resp_id);
-
- dev_err(adapter->dev, "last_event_index = %d\n",
- adapter->dbg.last_event_index);
- dev_err(adapter->dev, "last_event: %*ph\n",
- (int)sizeof(adapter->dbg.last_event),
- adapter->dbg.last_event);
-
- dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
- adapter->data_sent, adapter->cmd_sent);
-
- dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
- adapter->ps_mode, adapter->ps_state);
+ mwifiex_dbg(adapter, MSG,
+ "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
+ adapter->dbg.timeout_cmd_id,
+ adapter->dbg.timeout_cmd_act);
+
+ mwifiex_dbg(adapter, MSG,
+ "num_data_h2c_failure = %d\n",
+ adapter->dbg.num_tx_host_to_card_failure);
+ mwifiex_dbg(adapter, MSG,
+ "num_cmd_h2c_failure = %d\n",
+ adapter->dbg.num_cmd_host_to_card_failure);
+
+ mwifiex_dbg(adapter, MSG,
+ "is_cmd_timedout = %d\n",
+ adapter->is_cmd_timedout);
+ mwifiex_dbg(adapter, MSG,
+ "num_tx_timeout = %d\n",
+ adapter->dbg.num_tx_timeout);
+
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_index = %d\n",
+ adapter->dbg.last_cmd_index);
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_id),
+ adapter->dbg.last_cmd_id);
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_act: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_act),
+ adapter->dbg.last_cmd_act);
+
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_resp_index = %d\n",
+ adapter->dbg.last_cmd_resp_index);
+ mwifiex_dbg(adapter, MSG,
+ "last_cmd_resp_id: %*ph\n",
+ (int)sizeof(adapter->dbg.last_cmd_resp_id),
+ adapter->dbg.last_cmd_resp_id);
+
+ mwifiex_dbg(adapter, MSG,
+ "last_event_index = %d\n",
+ adapter->dbg.last_event_index);
+ mwifiex_dbg(adapter, MSG,
+ "last_event: %*ph\n",
+ (int)sizeof(adapter->dbg.last_event),
+ adapter->dbg.last_event);
+
+ mwifiex_dbg(adapter, MSG,
+ "data_sent=%d cmd_sent=%d\n",
+ adapter->data_sent, adapter->cmd_sent);
+
+ mwifiex_dbg(adapter, MSG,
+ "ps_mode=%d ps_state=%d\n",
+ adapter->ps_mode, adapter->ps_state);
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -ETIMEDOUT;
@@ -948,8 +993,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
- if (adapter->if_ops.fw_dump)
- adapter->if_ops.fw_dump(adapter);
+ if (adapter->if_ops.device_dump)
+ adapter->if_ops.device_dump(adapter);
if (adapter->if_ops.card_reset)
adapter->if_ops.card_reset(adapter);
@@ -1015,7 +1060,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
if (!priv)
continue;
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: aborting scan\n");
+ mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
}
@@ -1075,7 +1120,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
if (!priv)
continue;
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: aborting scan\n");
+ mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
}
@@ -1100,11 +1145,11 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
!adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
mwifiex_dnld_sleep_confirm_cmd(adapter);
else
- dev_dbg(adapter->dev,
- "cmd: Delay Sleep Confirm (%s%s%s)\n",
- (adapter->cmd_sent) ? "D" : "",
- (adapter->curr_cmd) ? "C" : "",
- (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Delay Sleep Confirm (%s%s%s)\n",
+ (adapter->cmd_sent) ? "D" : "",
+ (adapter->curr_cmd) ? "C" : "",
+ (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
}
/*
@@ -1120,15 +1165,18 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
- dev_dbg(priv->adapter->dev, "event: hs_activated\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: hs_activated\n");
priv->adapter->hs_activate_wait_q_woken = true;
wake_up_interruptible(
&priv->adapter->hs_activate_wait_q);
} else {
- dev_dbg(priv->adapter->dev, "event: HS not configured\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: HS not configured\n");
}
} else {
- dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
+ mwifiex_dbg(priv->adapter, EVENT,
+ "event: hs_deactivated\n");
priv->adapter->hs_activated = false;
}
}
@@ -1156,11 +1204,12 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
mwifiex_hs_activated_event(priv, true);
return 0;
} else {
- dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
- " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
- resp->result, conditions,
- phs_cfg->params.hs_config.gpio,
- phs_cfg->params.hs_config.gap);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: CMD_RESP: HS_CFG cmd reply\t"
+ " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
+ resp->result, conditions,
+ phs_cfg->params.hs_config.gpio,
+ phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
adapter->is_hs_configured = true;
@@ -1182,8 +1231,10 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
void
mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
- " since there is interrupt from the firmware\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: auto cancelling host sleep\t"
+ "since there is interrupt from the firmware\n",
+ __func__);
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
@@ -1212,13 +1263,14 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
uint16_t seq_num = le16_to_cpu(cmd->seq_num);
if (!upld_len) {
- dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: cmd size is 0\n", __func__);
return;
}
- dev_dbg(adapter->dev,
- "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
- command, result, le16_to_cpu(cmd->size), seq_num);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+ command, result, le16_to_cpu(cmd->size), seq_num);
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
@@ -1232,15 +1284,16 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
command &= HostCmd_CMD_ID_MASK;
if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
- dev_err(adapter->dev,
- "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
- __func__, command, result);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
+ __func__, command, result);
return;
}
if (result) {
- dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: sleep confirm cmd failed\n",
+ __func__);
adapter->pm_wakeup_card_req = false;
adapter->ps_state = PS_STATE_AWAKE;
return;
@@ -1305,7 +1358,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
cmd_size += sizeof(*ps_tlv);
tlv += sizeof(*ps_tlv);
- dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: PS Command: Enter PS\n");
ps_mode->null_pkt_interval =
cpu_to_le16(adapter->null_pkt_interval);
ps_mode->multiple_dtims =
@@ -1335,8 +1389,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
tlv += sizeof(*auto_ds_tlv);
if (auto_ds)
idletime = auto_ds->idle_time;
- dev_dbg(priv->adapter->dev,
- "cmd: PS Command: Enter Auto Deep Sleep\n");
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: PS Command: Enter Auto Deep Sleep\n");
auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
}
cmd->size = cpu_to_le16(cmd_size);
@@ -1363,27 +1417,31 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
uint16_t auto_ps_bitmap =
le16_to_cpu(ps_mode->params.ps_bitmap);
- dev_dbg(adapter->dev,
- "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
- __func__, resp->result, action);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
+ __func__, resp->result, action);
if (action == EN_AUTO_PS) {
if (auto_ps_bitmap & BITMAP_AUTO_DS) {
- dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Enabled auto deep sleep\n");
priv->adapter->is_deep_sleep = true;
}
if (auto_ps_bitmap & BITMAP_STA_PS) {
- dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Enabled STA power save\n");
if (adapter->sleep_period.period)
- dev_dbg(adapter->dev,
- "cmd: set to uapsd/pps mode\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: set to uapsd/pps mode\n");
}
} else if (action == DIS_AUTO_PS) {
if (ps_bitmap & BITMAP_AUTO_DS) {
priv->adapter->is_deep_sleep = false;
- dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Disabled auto deep sleep\n");
}
if (ps_bitmap & BITMAP_STA_PS) {
- dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Disabled STA power save\n");
if (adapter->sleep_period.period) {
adapter->delay_null_pkt = false;
adapter->tx_lock_flag = false;
@@ -1396,7 +1454,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
else
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
- dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: ps_bitmap=%#x\n", ps_bitmap);
if (pm_cfg) {
/* This section is for get power save mode */
@@ -1533,29 +1592,29 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
api_rev->major_ver;
adapter->key_api_minor_ver =
api_rev->minor_ver;
- dev_dbg(adapter->dev,
- "key_api v%d.%d\n",
- adapter->key_api_major_ver,
- adapter->key_api_minor_ver);
+ mwifiex_dbg(adapter, INFO,
+ "key_api v%d.%d\n",
+ adapter->key_api_major_ver,
+ adapter->key_api_minor_ver);
break;
case FW_API_VER_ID:
adapter->fw_api_ver =
api_rev->major_ver;
- dev_dbg(adapter->dev,
- "Firmware api version %d\n",
- adapter->fw_api_ver);
+ mwifiex_dbg(adapter, INFO,
+ "Firmware api version %d\n",
+ adapter->fw_api_ver);
break;
default:
- dev_warn(adapter->dev,
- "Unknown api_id: %d\n",
- api_id);
+ mwifiex_dbg(adapter, FATAL,
+ "Unknown api_id: %d\n",
+ api_id);
break;
}
break;
default:
- dev_warn(adapter->dev,
- "Unknown GET_HW_SPEC TLV type: %#x\n",
- le16_to_cpu(tlv->type));
+ mwifiex_dbg(adapter, FATAL,
+ "Unknown GET_HW_SPEC TLV type: %#x\n",
+ le16_to_cpu(tlv->type));
break;
}
parsed_len += le16_to_cpu(tlv->len) +
@@ -1565,14 +1624,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
}
}
- dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
- adapter->fw_release_number);
- dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
- hw_spec->permanent_addr);
- dev_dbg(adapter->dev,
- "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
- le16_to_cpu(hw_spec->hw_if_version),
- le16_to_cpu(hw_spec->version));
+ mwifiex_dbg(adapter, INFO,
+ "info: GET_HW_SPEC: fw_release_number- %#x\n",
+ adapter->fw_release_number);
+ mwifiex_dbg(adapter, INFO,
+ "info: GET_HW_SPEC: permanent addr: %pM\n",
+ hw_spec->permanent_addr);
+ mwifiex_dbg(adapter, INFO,
+ "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
+ le16_to_cpu(hw_spec->hw_if_version),
+ le16_to_cpu(hw_spec->version));
ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr);
adapter->region_code = le16_to_cpu(hw_spec->region_code);
@@ -1585,8 +1646,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* If it's unidentified region code, use the default (USA) */
if (i >= MWIFIEX_MAX_REGION_CODE) {
adapter->region_code = 0x10;
- dev_dbg(adapter->dev,
- "cmd: unknown region code, use default (USA)\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: unknown region code, use default (USA)\n");
}
adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 1fb329dc6744..5a0636d43a1b 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -152,24 +152,24 @@ free_and_exit:
}
/*
- * Proc firmware dump read handler.
+ * Proc device dump read handler.
*
- * This function is called when the 'fw_dump' file is opened for
+ * This function is called when the 'device_dump' file is opened for
* reading.
- * This function dumps firmware memory in different files
- * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * This function dumps driver information and firmware memory segments
+ * (ex. DTCM, ITCM, SQRAM etc.) for
* debugging.
*/
static ssize_t
-mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *ppos)
+mwifiex_device_dump_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
{
struct mwifiex_private *priv = file->private_data;
- if (!priv->adapter->if_ops.fw_dump)
+ if (!priv->adapter->if_ops.device_dump)
return -EIO;
- priv->adapter->if_ops.fw_dump(priv->adapter);
+ priv->adapter->if_ops.device_dump(priv->adapter);
return 0;
}
@@ -535,6 +535,144 @@ done:
return ret;
}
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv =
+ (struct mwifiex_private *)file->private_data;
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *)page;
+ size_t ret = 0;
+ int pos = 0;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n",
+ priv->adapter->debug_mask);
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+ free_page(page);
+ return ret;
+}
+
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ unsigned long debug_mask;
+ struct mwifiex_private *priv = (void *)file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (void *)addr;
+ size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, buf_size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (kstrtoul(buf, 0, &debug_mask)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ priv->adapter->debug_mask = debug_mask;
+ ret = count;
+done:
+ free_page(addr);
+ return ret;
+}
+
+/* Proc memrw file write handler.
+ * This function is called when the 'memrw' file is opened for writing
+ * This function can be used to write to a memory location.
+ */
+static ssize_t
+mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ char cmd;
+ struct mwifiex_ds_mem_rw mem_rw;
+ u16 cmd_action;
+ struct mwifiex_private *priv = (void *)file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (void *)addr;
+ size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, ubuf, buf_size)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if ((cmd == 'r') || (cmd == 'R')) {
+ cmd_action = HostCmd_ACT_GEN_GET;
+ mem_rw.value = 0;
+ } else if ((cmd == 'w') || (cmd == 'W')) {
+ cmd_action = HostCmd_ACT_GEN_SET;
+ } else {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(&priv->mem_rw, &mem_rw, sizeof(mem_rw));
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_MEM_ACCESS, cmd_action, 0,
+ &mem_rw, true))
+ ret = -1;
+ else
+ ret = count;
+
+done:
+ free_page(addr);
+ return ret;
+}
+
+/* Proc memrw file read handler.
+ * This function is called when the 'memrw' file is opened for reading
+ * This function can be used to read from a memory location.
+ */
+static ssize_t
+mwifiex_memrw_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv = (void *)file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ char *buf = (char *)addr;
+ int ret, pos = 0;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr,
+ priv->mem_rw.value);
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+ free_page(addr);
+ return ret;
+}
+
static u32 saved_offset = -1, saved_bytes = -1;
/*
@@ -654,7 +792,8 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
if (arg_num > 3) {
- dev_err(priv->adapter->dev, "Too many arguments\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Too many arguments\n");
ret = -EINVAL;
goto done;
}
@@ -746,11 +885,13 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \
MWIFIEX_DFS_FILE_READ_OPS(info);
MWIFIEX_DFS_FILE_READ_OPS(debug);
MWIFIEX_DFS_FILE_READ_OPS(getlog);
-MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
+MWIFIEX_DFS_FILE_READ_OPS(device_dump);
MWIFIEX_DFS_FILE_OPS(regrdwr);
MWIFIEX_DFS_FILE_OPS(rdeeprom);
+MWIFIEX_DFS_FILE_OPS(memrw);
MWIFIEX_DFS_FILE_OPS(hscfg);
MWIFIEX_DFS_FILE_OPS(histogram);
+MWIFIEX_DFS_FILE_OPS(debug_mask);
/*
* This function creates the debug FS directory structure and the files.
@@ -772,9 +913,11 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(getlog);
MWIFIEX_DFS_ADD_FILE(regrdwr);
MWIFIEX_DFS_ADD_FILE(rdeeprom);
- MWIFIEX_DFS_ADD_FILE(fw_dump);
+ MWIFIEX_DFS_ADD_FILE(device_dump);
+ MWIFIEX_DFS_ADD_FILE(memrw);
MWIFIEX_DFS_ADD_FILE(hscfg);
MWIFIEX_DFS_ADD_FILE(histogram);
+ MWIFIEX_DFS_ADD_FILE(debug_mask);
}
/*
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
index 65d8d6d4b6ba..58400c69ab26 100644
--- a/drivers/net/wireless/mwifiex/ethtool.c
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -64,104 +64,7 @@ static int mwifiex_ethtool_set_wol(struct net_device *dev,
return 0;
}
-static int
-mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
-{
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct mwifiex_adapter *adapter = priv->adapter;
- struct memory_type_mapping *entry;
-
- if (!adapter->if_ops.fw_dump)
- return -ENOTSUPP;
-
- dump->flag = adapter->curr_mem_idx;
- dump->version = 1;
- if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
- dump->len = adapter->drv_info_size;
- } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
- entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
- dump->len = entry->mem_size;
- } else {
- dump->len = 0;
- }
-
- return 0;
-}
-
-static int
-mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
- void *buffer)
-{
- u8 *p = buffer;
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct mwifiex_adapter *adapter = priv->adapter;
- struct memory_type_mapping *entry;
-
- if (!adapter->if_ops.fw_dump)
- return -ENOTSUPP;
-
- if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
- if (!adapter->drv_info_dump)
- return -EFAULT;
- memcpy(p, adapter->drv_info_dump, adapter->drv_info_size);
- return 0;
- }
-
- if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
- dev_err(adapter->dev, "firmware dump in progress!!\n");
- return -EBUSY;
- }
-
- entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
-
- if (!entry->mem_ptr)
- return -EFAULT;
-
- memcpy(p, entry->mem_ptr, entry->mem_size);
-
- entry->mem_size = 0;
- vfree(entry->mem_ptr);
- entry->mem_ptr = NULL;
-
- return 0;
-}
-
-static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
-{
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct mwifiex_adapter *adapter = priv->adapter;
-
- if (!adapter->if_ops.fw_dump)
- return -ENOTSUPP;
-
- if (val->flag == MWIFIEX_DRV_INFO_IDX) {
- adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX;
- return 0;
- }
-
- if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
- dev_err(adapter->dev, "firmware dump in progress!!\n");
- return -EBUSY;
- }
-
- if (val->flag == MWIFIEX_FW_DUMP_IDX) {
- adapter->curr_mem_idx = val->flag;
- adapter->if_ops.fw_dump(adapter);
- return 0;
- }
-
- if (val->flag < 0 || val->flag >= adapter->num_mem_types)
- return -EINVAL;
-
- adapter->curr_mem_idx = val->flag;
-
- return 0;
-}
-
const struct ethtool_ops mwifiex_ethtool_ops = {
.get_wol = mwifiex_ethtool_get_wol,
.set_wol = mwifiex_ethtool_set_wol,
- .get_dump_flag = mwifiex_get_dump_flag,
- .get_dump_data = mwifiex_get_dump_data,
- .set_dump = mwifiex_set_dump,
};
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 59d8964dd0dc..c404390cb0fa 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -323,6 +323,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
+#define HostCmd_CMD_MEM_ACCESS 0x0086
#define HostCmd_CMD_CFG_DATA 0x008f
#define HostCmd_CMD_VERSION_EXT 0x0097
#define HostCmd_CMD_MEF_CFG 0x009a
@@ -1576,6 +1577,13 @@ struct mwifiex_ie_types_extcap {
u8 ext_capab[0];
} __packed;
+struct host_cmd_ds_mem_access {
+ __le16 action;
+ __le16 reserved;
+ __le32 addr;
+ __le32 value;
+};
+
struct mwifiex_ie_types_qos_info {
struct mwifiex_ie_types_header header;
u8 qos_info;
@@ -1958,6 +1966,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_p2p_mode_cfg mode_cfg;
struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
struct host_cmd_ds_mef_cfg mef_cfg;
+ struct host_cmd_ds_mem_access mem;
struct host_cmd_ds_mac_reg_access mac_reg;
struct host_cmd_ds_bbp_reg_access bbp_reg;
struct host_cmd_ds_rf_reg_access rf_reg;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index e12192f5cfad..df7fdc09d38c 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -56,7 +56,7 @@ static void wakeup_timer_fn(unsigned long data)
{
struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
- dev_err(adapter->dev, "Firmware wakeup failed\n");
+ mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n");
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
mwifiex_cancel_all_pending_cmd(adapter);
@@ -172,8 +172,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
/* Allocate command buffer */
ret = mwifiex_alloc_cmd_buffer(adapter);
if (ret) {
- dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to alloc cmd buffer\n",
+ __func__);
return -1;
}
@@ -182,8 +183,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
+ INTF_HEADER_LEN);
if (!adapter->sleep_cfm) {
- dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
- " cmd buffer\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to alloc sleep cfm\t"
+ " cmd buffer\n", __func__);
return -1;
}
skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN);
@@ -417,7 +419,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
mwifiex_free_lock_list(adapter);
/* Free command buffer */
- dev_dbg(adapter->dev, "info: free cmd buffer\n");
+ mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");
mwifiex_free_cmd_buffer(adapter);
for (idx = 0; idx < adapter->num_mem_types; idx++) {
@@ -433,6 +435,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
if (adapter->drv_info_dump) {
vfree(adapter->drv_info_dump);
+ adapter->drv_info_dump = NULL;
adapter->drv_info_size = 0;
}
@@ -595,10 +598,11 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
for (i = 0; i < adapter->priv_num; ++i) {
head = &adapter->bss_prio_tbl[i].bss_prio_head;
lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
- dev_dbg(adapter->dev, "info: delete BSS priority table,"
- " bss_type = %d, bss_num = %d, i = %d,"
- " head = %p\n",
- priv->bss_type, priv->bss_num, i, head);
+ mwifiex_dbg(adapter, INFO,
+ "info: delete BSS priority table,\t"
+ "bss_type = %d, bss_num = %d, i = %d,\t"
+ "head = %p\n",
+ priv->bss_type, priv->bss_num, i, head);
{
spin_lock_irqsave(lock, flags);
@@ -609,9 +613,10 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
list_for_each_entry_safe(bssprio_node, tmp_node, head,
list) {
if (bssprio_node->priv == priv) {
- dev_dbg(adapter->dev, "info: Delete "
- "node %p, next = %p\n",
- bssprio_node, tmp_node);
+ mwifiex_dbg(adapter, INFO,
+ "info: Delete\t"
+ "node %p, next = %p\n",
+ bssprio_node, tmp_node);
list_del(&bssprio_node->list);
kfree(bssprio_node);
}
@@ -659,20 +664,23 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
/* wait for mwifiex_process to complete */
if (adapter->mwifiex_processing) {
- dev_warn(adapter->dev, "main process is still running\n");
+ mwifiex_dbg(adapter, WARN,
+ "main process is still running\n");
return ret;
}
/* cancel current command */
if (adapter->curr_cmd) {
- dev_warn(adapter->dev, "curr_cmd is still in processing\n");
+ mwifiex_dbg(adapter, WARN,
+ "curr_cmd is still in processing\n");
del_timer_sync(&adapter->cmd_timer);
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
adapter->curr_cmd = NULL;
}
/* shut down mwifiex */
- dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
+ mwifiex_dbg(adapter, MSG,
+ "info: shutdown mwifiex...\n");
/* Clean up Tx/Rx queues and delete BSS priority table */
for (i = 0; i < adapter->priv_num; i++) {
@@ -741,8 +749,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* check if firmware is already running */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (!ret) {
- dev_notice(adapter->dev,
- "WLAN FW already running! Skip FW dnld\n");
+ mwifiex_dbg(adapter, MSG,
+ "WLAN FW already running! Skip FW dnld\n");
return 0;
}
@@ -750,8 +758,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* check if we are the winner for downloading FW */
if (!adapter->winner) {
- dev_notice(adapter->dev,
- "FW already running! Skip FW dnld\n");
+ mwifiex_dbg(adapter, MSG,
+ "FW already running! Skip FW dnld\n");
goto poll_fw;
}
}
@@ -760,7 +768,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
/* Download firmware with helper */
ret = adapter->if_ops.prog_fw(adapter, pmfw);
if (ret) {
- dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "prog_fw failed ret=%#x\n", ret);
return ret;
}
}
@@ -769,7 +778,8 @@ poll_fw:
/* Check if the firmware is downloaded successfully or not */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (ret)
- dev_err(adapter->dev, "FW failed to be active in time\n");
+ mwifiex_dbg(adapter, ERROR,
+ "FW failed to be active in time\n");
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d2b05c3a96da..6f11a25a6b49 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -189,6 +189,7 @@ struct tdls_peer_info {
};
struct mwifiex_debug_info {
+ unsigned int debug_mask;
u32 int_counter;
u32 packets_out[MAX_NUM_TID];
u32 tx_buf_size;
@@ -342,6 +343,11 @@ struct mwifiex_ds_read_eeprom {
u8 value[MAX_EEPROM_DATA];
};
+struct mwifiex_ds_mem_rw {
+ u32 addr;
+ u32 value;
+};
+
#define IEEE_MAX_IE_SIZE 256
#define MWIFIEX_IE_HDR_SIZE (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE)
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 411a6c2f4aca..cce8e39aa45e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -53,9 +53,9 @@ mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->gen_ie_buf_len) {
- dev_dbg(priv->adapter->dev,
- "info: %s: append generic ie len %d to %p\n",
- __func__, priv->gen_ie_buf_len, *buffer);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: append generic ie len %d to %p\n",
+ __func__, priv->gen_ie_buf_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
@@ -125,9 +125,9 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
tsf_val = cpu_to_le64(bss_desc->timestamp);
- dev_dbg(priv->adapter->dev,
- "info: %s: TSF offset calc: %016llx - %016llx\n",
- __func__, bss_desc->timestamp, bss_desc->fw_tsf);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: TSF offset calc: %016llx - %016llx\n",
+ __func__, bss_desc->timestamp, bss_desc->fw_tsf);
memcpy(*buffer, &tsf_val, sizeof(tsf_val));
*buffer += sizeof(tsf_val);
@@ -152,7 +152,7 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
tmp = kmemdup(rate1, rate1_size, GFP_KERNEL);
if (!tmp) {
- dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
+ mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n");
return -ENOMEM;
}
@@ -169,8 +169,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
}
}
- dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
- priv->data_rate);
+ mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n",
+ priv->data_rate);
if (!priv->is_data_rate_auto) {
while (*ptr) {
@@ -180,9 +180,10 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
}
ptr++;
}
- dev_err(priv->adapter->dev, "previously set fixed data rate %#x"
- " is not compatible with the network\n",
- priv->data_rate);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "previously set fixed data rate %#x\t"
+ "is not compatible with the network\n",
+ priv->data_rate);
ret = -1;
goto done;
@@ -214,8 +215,9 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
card_rates, card_rates_size)) {
*out_rates_size = 0;
- dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
- __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: cannot get common rates\n",
+ __func__);
return -1;
}
@@ -246,8 +248,9 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->wps_ie_len) {
- dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
- priv->wps_ie_len, *buffer);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: append wps ie %d to %p\n",
+ priv->wps_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
@@ -292,8 +295,9 @@ mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
* parameter buffer pointer.
*/
if (priv->wapi_ie_len) {
- dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
- priv->wapi_ie_len, *buffer);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: append wapi ie %d to %p\n",
+ priv->wapi_ie_len, *buffer);
/* Wrap the generic IE buffer with a pass through TLV type */
ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
@@ -453,8 +457,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
rates_tlv->header.len = cpu_to_le16((u16) rates_size);
memcpy(rates_tlv->rates, rates, rates_size);
pos += sizeof(rates_tlv->header) + rates_size;
- dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
- rates_size);
+ mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n",
+ rates_size);
/* Add the Authentication type to be used for Auth frames */
auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
@@ -487,14 +491,14 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
- dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
}
@@ -544,8 +548,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
tmp_cap &= CAPINFO_MASK;
- dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
- tmp_cap, CAPINFO_MASK);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+ tmp_cap, CAPINFO_MASK);
assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
return 0;
@@ -621,23 +626,35 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct ieee_types_assoc_rsp *assoc_rsp;
struct mwifiex_bssdescriptor *bss_desc;
bool enable_data = true;
- u16 cap_info, status_code;
+ u16 cap_info, status_code, aid;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
status_code = le16_to_cpu(assoc_rsp->status_code);
+ aid = le16_to_cpu(assoc_rsp->a_id);
+
+ if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
+ dev_err(priv->adapter->dev,
+ "invalid AID value 0x%x; bits 15:14 not set\n",
+ aid);
+
+ aid &= ~(BIT(15) | BIT(14));
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
+ assoc_rsp->a_id = cpu_to_le16(aid);
+
if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
- dev_err(priv->adapter->dev,
- "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n",
- status_code, cap_info, le16_to_cpu(assoc_rsp->a_id));
+ mwifiex_dbg(priv->adapter, ERROR,
+ "ASSOC_RESP: failed,\t"
+ "status code=%d err=%#x a_id=%#x\n",
+ status_code, cap_info,
+ le16_to_cpu(assoc_rsp->a_id));
if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) {
if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT)
@@ -661,8 +678,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
/* Set the attempted BSSID Index to current */
bss_desc = priv->attempted_bss_desc;
- dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
- bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n",
+ bss_desc->ssid.ssid);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor,
@@ -692,8 +709,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
= ((bss_desc->wmm_ie.qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
- dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
- priv->curr_pkt_filter);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
+ priv->curr_pkt_filter);
if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
priv->wpa_is_gtk_set = false;
@@ -709,8 +727,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
}
if (enable_data)
- dev_dbg(priv->adapter->dev,
- "info: post association, re-enabling data flow\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: post association, re-enabling data flow\n");
/* Reset SNR/NF/RSSI values */
priv->data_rssi_last = 0;
@@ -728,7 +746,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->adapter->dbg.num_cmd_assoc_success++;
- dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
/* Add the ra_list here for infra mode as there will be only 1 ra
always */
@@ -815,8 +833,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
- adhoc_start->ssid);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
+ adhoc_start->ssid);
memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
@@ -848,12 +866,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
if (!priv->adhoc_channel) {
- dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
+ mwifiex_dbg(adapter, ERROR,
+ "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
return -1;
}
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
- priv->adhoc_channel);
+ mwifiex_dbg(adapter, INFO,
+ "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
+ priv->adhoc_channel);
priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
priv->curr_bss_params.band = adapter->adhoc_start_band;
@@ -885,13 +905,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
/* Set up privacy in bss_desc */
if (priv->sec_info.encryption_mode) {
/* Ad-Hoc capability privacy on */
- dev_dbg(adapter->dev,
- "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
tmp_cap |= WLAN_CAPABILITY_PRIVACY;
} else {
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
- " setting privacy to ACCEPT ALL\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: ADHOC_S_CMD: wep_status NOT set,\t"
+ "setting privacy to ACCEPT ALL\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
@@ -902,8 +923,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&priv->curr_pkt_filter, false)) {
- dev_err(adapter->dev,
- "ADHOC_S_CMD: G Protection config failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "ADHOC_S_CMD: G Protection config failed\n");
return -1;
}
}
@@ -918,10 +939,10 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memcpy(&priv->curr_bss_params.data_rates,
&adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
- adhoc_start->data_rate);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n",
+ adhoc_start->data_rate);
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
if (IS_SUPPORT_MULTI_BANDS(adapter)) {
/* Append a channel TLV */
@@ -935,8 +956,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
chan_tlv->chan_scan_param[0].chan_number =
(u8) priv->curr_bss_params.bss_descriptor.channel;
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
@@ -951,8 +972,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
chan_tlv->chan_scan_param[0].radio_type |=
(IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
- dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size +=
@@ -1074,8 +1095,8 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET, 0,
&curr_pkt_filter, false)) {
- dev_err(priv->adapter->dev,
- "ADHOC_J_CMD: G Protection config failed\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "ADHOC_J_CMD: G Protection config failed\n");
return -1;
}
}
@@ -1106,14 +1127,15 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
tmp_cap &= CAPINFO_MASK;
- dev_dbg(priv->adapter->dev,
- "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
- tmp_cap, CAPINFO_MASK);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+ tmp_cap, CAPINFO_MASK);
/* Information on BSSID descriptor passed to FW */
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
- adhoc_join->bss_descriptor.bssid,
- adhoc_join->bss_descriptor.ssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
+ adhoc_join->bss_descriptor.bssid,
+ adhoc_join->bss_descriptor.ssid);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
bss_desc->supported_rates[i]; i++)
@@ -1149,14 +1171,14 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_scan_param_set));
chan_tlv->chan_scan_param[0].chan_number =
(bss_desc->phy_param_set.ds_param_set.current_chan);
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n",
- chan_tlv->chan_scan_param[0].chan_number);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n",
+ chan_tlv->chan_scan_param[0].chan_number);
chan_tlv->chan_scan_param[0].radio_type =
mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n",
- chan_tlv->chan_scan_param[0].radio_type);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n",
+ chan_tlv->chan_scan_param[0].radio_type);
pos += sizeof(chan_tlv->header) +
sizeof(struct mwifiex_chan_scan_param_set);
cmd_append_size += sizeof(chan_tlv->header) +
@@ -1210,7 +1232,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
/* Join result code 0 --> SUCCESS */
reason_code = le16_to_cpu(resp->result);
if (reason_code) {
- dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
+ mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
if (priv->media_connected)
mwifiex_reset_connect_state(priv, reason_code);
@@ -1225,8 +1247,8 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
priv->media_connected = true;
if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
- dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
- bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n",
+ bss_desc->ssid.ssid);
/* Update the created network descriptor with the new BSSID */
memcpy(bss_desc->mac_address,
@@ -1238,8 +1260,9 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
* Now the join cmd should be successful.
* If BSSID has changed use SSID to compare instead of BSSID
*/
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
- bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_RESP %s\n",
+ bss_desc->ssid.ssid);
/*
* Make a copy of current BSSID descriptor, only needed for
@@ -1252,10 +1275,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
priv->adhoc_state = ADHOC_JOINED;
}
- dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
- priv->adhoc_channel);
- dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
- priv->curr_bss_params.bss_descriptor.mac_address);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n",
+ priv->adhoc_channel);
+ mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n",
+ priv->curr_bss_params.bss_descriptor.mac_address);
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
@@ -1317,12 +1340,12 @@ int
mwifiex_adhoc_start(struct mwifiex_private *priv,
struct cfg80211_ssid *adhoc_ssid)
{
- dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
- priv->adhoc_channel);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
- priv->curr_bss_params.bss_descriptor.channel);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
- priv->curr_bss_params.band);
+ mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n",
+ priv->adhoc_channel);
+ mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n",
+ priv->curr_bss_params.bss_descriptor.channel);
+ mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n",
+ priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->config_bands & BAND_AAC)
@@ -1343,14 +1366,16 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
- dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid);
- dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
- dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
- bss_desc->ssid.ssid);
- dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
- bss_desc->ssid.ssid_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: adhoc join: curr_bss ssid =%s\n",
+ priv->curr_bss_params.bss_descriptor.ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: adhoc join: curr_bss ssid_len =%u\n",
+ priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+ mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n",
+ bss_desc->ssid.ssid);
+ mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n",
+ bss_desc->ssid.ssid_len);
/* Check if the requested SSID is already joined */
if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
@@ -1358,8 +1383,9 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
&priv->curr_bss_params.bss_descriptor.ssid) &&
(priv->curr_bss_params.bss_descriptor.bss_mode ==
NL80211_IFTYPE_ADHOC)) {
- dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID"
- " is the same as current; not attempting to re-join\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ADHOC_J_CMD: new ad-hoc SSID\t"
+ "is the same as current; not attempting to re-join\n");
return -1;
}
@@ -1370,10 +1396,12 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
else
mwifiex_set_ba_params(priv);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
- priv->curr_bss_params.bss_descriptor.channel);
- dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
- priv->curr_bss_params.band);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: curr_bss_params.channel = %d\n",
+ priv->curr_bss_params.bss_descriptor.channel);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: curr_bss_params.band = %c\n",
+ priv->curr_bss_params.band);
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
HostCmd_ACT_GEN_SET, 0, bss_desc, true);
@@ -1421,7 +1449,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
ret = mwifiex_deauthenticate_infra(priv, mac);
if (ret)
cfg80211_disconnected(priv->netdev, 0, NULL, 0,
- GFP_KERNEL);
+ true, GFP_KERNEL);
break;
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 03a95c7d34bf..3ba4e0e04223 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -24,6 +24,10 @@
#define VERSION "1.0"
+static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
+module_param(debug_mask, uint, 0);
+MODULE_PARM_DESC(debug_mask, "bitmap for debug flags");
+
const char driver_version[] = "mwifiex " VERSION " (%s) ";
static char *cal_data_cfg;
module_param(cal_data_cfg, charp, 0);
@@ -63,6 +67,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
/* Save interface specific operations in adapter */
memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
+ adapter->debug_mask = debug_mask;
/* card specific initialization has been deferred until now .. */
if (adapter->if_ops.init_if)
@@ -89,7 +94,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
return 0;
error:
- dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
+ mwifiex_dbg(adapter, ERROR,
+ "info: leave mwifiex_register with error\n");
for (i = 0; i < adapter->priv_num; i++)
kfree(adapter->priv[i]);
@@ -231,11 +237,10 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
goto exit_main_proc;
} else {
adapter->mwifiex_processing = true;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
}
process_start:
do {
- adapter->more_task_flag = false;
- spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
(adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
break;
@@ -275,7 +280,6 @@ process_start:
adapter->pm_wakeup_fw_try = true;
mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
adapter->if_ops.wakeup(adapter);
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue;
}
@@ -335,7 +339,6 @@ process_start:
(adapter->ps_state == PS_STATE_PRE_SLEEP) ||
(adapter->ps_state == PS_STATE_SLEEP_CFM) ||
adapter->tx_lock_flag){
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
continue;
}
@@ -386,12 +389,14 @@ process_start:
}
break;
}
- spin_lock_irqsave(&adapter->main_proc_lock, flags);
} while (true);
spin_lock_irqsave(&adapter->main_proc_lock, flags);
- if (adapter->more_task_flag)
+ if (adapter->more_task_flag) {
+ adapter->more_task_flag = false;
+ spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
goto process_start;
+ }
adapter->mwifiex_processing = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
@@ -455,8 +460,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
struct wireless_dev *wdev;
if (!firmware) {
- dev_err(adapter->dev,
- "Failed to get firmware %s\n", adapter->fw_name);
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to get firmware %s\n", adapter->fw_name);
goto err_dnld_fw;
}
@@ -472,13 +477,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
if (ret == -1)
goto err_dnld_fw;
- dev_notice(adapter->dev, "WLAN FW is active\n");
+ mwifiex_dbg(adapter, MSG, "WLAN FW is active\n");
if (cal_data_cfg) {
if ((request_firmware(&adapter->cal_data, cal_data_cfg,
adapter->dev)) < 0)
- dev_err(adapter->dev,
- "Cal data request_firmware() failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Cal data request_firmware() failed\n");
}
/* enable host interrupt after fw dnld is successful */
@@ -503,12 +508,14 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
if (mwifiex_register_cfg80211(adapter)) {
- dev_err(adapter->dev, "cannot register with cfg80211\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register with cfg80211\n");
goto err_init_fw;
}
if (mwifiex_init_channel_scan_gap(adapter)) {
- dev_err(adapter->dev, "could not init channel stats table\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not init channel stats table\n");
goto err_init_fw;
}
@@ -522,7 +529,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
NL80211_IFTYPE_STATION, NULL, NULL);
if (IS_ERR(wdev)) {
- dev_err(adapter->dev, "cannot create default STA interface\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create default STA interface\n");
rtnl_unlock();
goto err_add_intf;
}
@@ -531,7 +539,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
NL80211_IFTYPE_AP, NULL, NULL);
if (IS_ERR(wdev)) {
- dev_err(adapter->dev, "cannot create AP interface\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create AP interface\n");
rtnl_unlock();
goto err_add_intf;
}
@@ -542,8 +551,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
NL80211_IFTYPE_P2P_CLIENT, NULL,
NULL);
if (IS_ERR(wdev)) {
- dev_err(adapter->dev,
- "cannot create p2p client interface\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot create p2p client interface\n");
rtnl_unlock();
goto err_add_intf;
}
@@ -551,7 +560,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
- dev_notice(adapter->dev, "driver_version = %s\n", fmt);
+ mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
goto done;
err_add_intf:
@@ -561,7 +570,8 @@ err_init_fw:
if (adapter->if_ops.disable_int)
adapter->if_ops.disable_int(adapter);
err_dnld_fw:
- pr_debug("info: %s: unregister device\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
@@ -602,8 +612,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
adapter->dev, GFP_KERNEL, adapter,
mwifiex_fw_dpc);
if (ret < 0)
- dev_err(adapter->dev,
- "request_firmware_nowait() returned error %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "request_firmware_nowait error %d\n", ret);
return ret;
}
@@ -629,7 +639,8 @@ mwifiex_close(struct net_device *dev)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (priv->scan_request) {
- dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "aborting scan on ndo_stop\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
priv->scan_aborting = true;
@@ -650,7 +661,8 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
txq = netdev_get_tx_queue(priv->netdev, index);
if (!netif_tx_queue_stopped(txq)) {
netif_tx_stop_queue(txq);
- dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
+ mwifiex_dbg(priv->adapter, DATA,
+ "stop queue: %d\n", index);
}
}
@@ -715,8 +727,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mwifiex_txinfo *tx_info;
bool multicast;
- dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
- jiffies, priv->bss_type, priv->bss_num);
+ mwifiex_dbg(priv->adapter, DATA,
+ "data: %lu BSS(%d-%d): Data <= kernel\n",
+ jiffies, priv->bss_type, priv->bss_num);
if (priv->adapter->surprise_removed) {
kfree_skb(skb);
@@ -724,28 +737,31 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
- dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: bad skb len %d\n", skb->len);
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
- dev_dbg(priv->adapter->dev,
- "data: Tx: insufficient skb headroom %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, DATA,
+ "data: Tx: insufficient skb headroom %d\n",
+ skb_headroom(skb));
/* Insufficient skb headroom - allocate a new skb */
new_skb =
skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
if (unlikely(!new_skb)) {
- dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: cannot alloca new_skb\n");
kfree_skb(skb);
priv->stats.tx_dropped++;
return 0;
}
kfree_skb(skb);
skb = new_skb;
- dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: new skb headroomd %d\n",
+ skb_headroom(skb));
}
tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -803,8 +819,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
if (!ret)
memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
else
- dev_err(priv->adapter->dev,
- "set mac address failed: ret=%d\n", ret);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "set mac address failed: ret=%d\n", ret);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -842,20 +858,22 @@ mwifiex_tx_timeout(struct net_device *dev)
priv->num_tx_timeout++;
priv->tx_timeout_cnt++;
- dev_err(priv->adapter->dev,
- "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
- jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
+ jiffies, priv->tx_timeout_cnt, priv->bss_type,
+ priv->bss_num);
mwifiex_set_trans_start(dev);
if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
priv->adapter->if_ops.card_reset) {
- dev_err(priv->adapter->dev,
- "tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tx_timeout_cnt exceeds threshold.\t"
+ "Triggering card reset!\n");
priv->adapter->if_ops.card_reset(priv->adapter);
}
}
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
{
void *p;
char drv_version[64];
@@ -868,10 +886,11 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
if (adapter->drv_info_dump) {
vfree(adapter->drv_info_dump);
+ adapter->drv_info_dump = NULL;
adapter->drv_info_size = 0;
}
- dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n");
+ mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
@@ -939,12 +958,12 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
}
if (adapter->iface_type == MWIFIEX_SDIO) {
- p += sprintf(p, "\n=== SDIO register DUMP===\n");
+ p += sprintf(p, "\n=== SDIO register dump===\n");
if (adapter->if_ops.reg_dump)
p += adapter->if_ops.reg_dump(adapter, p);
}
- p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n");
+ p += sprintf(p, "\n=== more debug information\n");
debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
if (debug_info) {
for (i = 0; i < adapter->priv_num; i++) {
@@ -959,9 +978,99 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
}
adapter->drv_info_size = p - adapter->drv_info_dump;
- dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n");
+ mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
+}
+EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
+
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
+{
+ u8 idx, *dump_data, *fw_dump_ptr;
+ u32 dump_len;
+
+ dump_len = (strlen("========Start dump driverinfo========\n") +
+ adapter->drv_info_size +
+ strlen("\n========End dump========\n"));
+
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ dump_len += (strlen("========Start dump ") +
+ strlen(entry->mem_name) +
+ strlen("========\n") +
+ (entry->mem_size + 1) +
+ strlen("\n========End dump========\n"));
+ }
+ }
+
+ dump_data = vzalloc(dump_len + 1);
+ if (!dump_data)
+ goto done;
+
+ fw_dump_ptr = dump_data;
+
+ /* Dump all the memory data into single file, a userspace script will
+ * be used to split all the memory data to multiple files
+ */
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump start");
+
+ strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
+ fw_dump_ptr += strlen("========Start dump driverinfo========\n");
+ memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size);
+ fw_dump_ptr += adapter->drv_info_size;
+ strcpy(fw_dump_ptr, "\n========End dump========\n");
+ fw_dump_ptr += strlen("\n========End dump========\n");
+
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ strcpy(fw_dump_ptr, "========Start dump ");
+ fw_dump_ptr += strlen("========Start dump ");
+
+ strcpy(fw_dump_ptr, entry->mem_name);
+ fw_dump_ptr += strlen(entry->mem_name);
+
+ strcpy(fw_dump_ptr, "========\n");
+ fw_dump_ptr += strlen("========\n");
+
+ memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size);
+ fw_dump_ptr += entry->mem_size;
+
+ strcpy(fw_dump_ptr, "\n========End dump========\n");
+ fw_dump_ptr += strlen("\n========End dump========\n");
+ }
+ }
+
+ /* device dump data will be free in device coredump release function
+ * after 5 min
+ */
+ dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
+ mwifiex_dbg(adapter, MSG,
+ "== mwifiex dump information to /sys/class/devcoredump end");
+
+done:
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
+
+ if (entry->mem_ptr) {
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = NULL;
+ }
+ entry->mem_size = 0;
+ }
+
+ if (adapter->drv_info_dump) {
+ vfree(adapter->drv_info_dump);
+ adapter->drv_info_dump = NULL;
+ adapter->drv_info_size = 0;
+ }
}
-EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info);
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
/*
* CFG802.11 network device handler for statistics retrieval.
@@ -1230,21 +1339,24 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
}
}
- dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: calling mwifiex_shutdown_drv...\n");
adapter->init_wait_q_woken = false;
if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
wait_event_interruptible(adapter->init_wait_q,
adapter->init_wait_q_woken);
- dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: mwifiex_shutdown_drv done\n");
if (atomic_read(&adapter->rx_pending) ||
atomic_read(&adapter->tx_pending) ||
atomic_read(&adapter->cmd_pending)) {
- dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, "
- "cmd_pending=%d\n",
- atomic_read(&adapter->rx_pending),
- atomic_read(&adapter->tx_pending),
- atomic_read(&adapter->cmd_pending));
+ mwifiex_dbg(adapter, ERROR,
+ "rx_pending=%d, tx_pending=%d,\t"
+ "cmd_pending=%d\n",
+ atomic_read(&adapter->rx_pending),
+ atomic_read(&adapter->tx_pending),
+ atomic_read(&adapter->cmd_pending));
}
for (i = 0; i < adapter->priv_num; i++) {
@@ -1264,11 +1376,13 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
wiphy_free(adapter->wiphy);
/* Unregister device */
- dev_dbg(adapter->dev, "info: unregister device\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: unregister device\n");
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
/* Free adapter structure */
- dev_dbg(adapter->dev, "info: free adapter\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: free adapter\n");
mwifiex_free_adapter(adapter);
exit_remove:
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index fe1256044a6c..5a6c1c76b33b 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -36,6 +36,7 @@
#include <linux/of.h>
#include <linux/idr.h>
#include <linux/inetdevice.h>
+#include <linux/devcoredump.h>
#include "decl.h"
#include "ioctl.h"
@@ -147,6 +148,54 @@ enum {
/* Address alignment */
#define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
+/**
+ *enum mwifiex_debug_level - marvell wifi debug level
+ */
+enum MWIFIEX_DEBUG_LEVEL {
+ MWIFIEX_DBG_MSG = 0x00000001,
+ MWIFIEX_DBG_FATAL = 0x00000002,
+ MWIFIEX_DBG_ERROR = 0x00000004,
+ MWIFIEX_DBG_DATA = 0x00000008,
+ MWIFIEX_DBG_CMD = 0x00000010,
+ MWIFIEX_DBG_EVENT = 0x00000020,
+ MWIFIEX_DBG_INTR = 0x00000040,
+ MWIFIEX_DBG_IOCTL = 0x00000080,
+
+ MWIFIEX_DBG_MPA_D = 0x00008000,
+ MWIFIEX_DBG_DAT_D = 0x00010000,
+ MWIFIEX_DBG_CMD_D = 0x00020000,
+ MWIFIEX_DBG_EVT_D = 0x00040000,
+ MWIFIEX_DBG_FW_D = 0x00080000,
+ MWIFIEX_DBG_IF_D = 0x00100000,
+
+ MWIFIEX_DBG_ENTRY = 0x10000000,
+ MWIFIEX_DBG_WARN = 0x20000000,
+ MWIFIEX_DBG_INFO = 0x40000000,
+ MWIFIEX_DBG_DUMP = 0x80000000,
+
+ MWIFIEX_DBG_ANY = 0xffffffff
+};
+
+#define MWIFIEX_DEFAULT_DEBUG_MASK (MWIFIEX_DBG_MSG | \
+ MWIFIEX_DBG_FATAL | \
+ MWIFIEX_DBG_ERROR)
+
+#define mwifiex_dbg(adapter, dbg_mask, fmt, args...) \
+do { \
+ if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \
+ if ((adapter)->dev) \
+ dev_info((adapter)->dev, fmt, ## args); \
+} while (0)
+
+#define DEBUG_DUMP_DATA_MAX_LEN 128
+#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \
+do { \
+ if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \
+ print_hex_dump(KERN_DEBUG, str, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, len, false); \
+} while (0)
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -451,7 +500,7 @@ enum rdwr_status {
};
enum mwifiex_iface_work_flags {
- MWIFIEX_IFACE_WORK_FW_DUMP,
+ MWIFIEX_IFACE_WORK_DEVICE_DUMP,
MWIFIEX_IFACE_WORK_CARD_RESET,
};
@@ -611,6 +660,7 @@ struct mwifiex_private {
struct delayed_work dfs_chan_sw_work;
struct cfg80211_beacon_data beacon_after;
struct mwifiex_11h_intf_state state_11h;
+ struct mwifiex_ds_mem_rw mem_rw;
};
@@ -740,8 +790,8 @@ struct mwifiex_if_ops {
int (*init_fw_port) (struct mwifiex_adapter *);
int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
void (*card_reset) (struct mwifiex_adapter *);
- void (*fw_dump)(struct mwifiex_adapter *);
int (*reg_dump)(struct mwifiex_adapter *, char *);
+ void (*device_dump)(struct mwifiex_adapter *);
int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
void (*iface_work)(struct work_struct *work);
void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
@@ -750,6 +800,7 @@ struct mwifiex_if_ops {
struct mwifiex_adapter {
u8 iface_type;
+ unsigned int debug_mask;
struct mwifiex_iface_comb iface_limit;
struct mwifiex_iface_comb curr_iface_comb;
struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
@@ -900,7 +951,6 @@ struct mwifiex_adapter {
u8 key_api_major_ver, key_api_minor_ver;
struct memory_type_mapping *mem_type_mapping_tbl;
u8 num_mem_types;
- u8 curr_mem_idx;
void *drv_info_dump;
u32 drv_info_size;
bool scan_chan_gap_enabled;
@@ -1434,7 +1484,8 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
u8 rx_rate, u8 ht_info);
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index bcc7751d883c..77b9055a2d14 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -57,7 +57,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
if (pci_dma_mapping_error(card->dev, mapping.addr)) {
- dev_err(adapter->dev, "failed to map pci memory!\n");
+ mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
return -1;
}
mapping.len = size;
@@ -89,8 +89,9 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
if (card->sleep_cookie_vbase) {
cookie_addr = (u32 *)card->sleep_cookie_vbase;
- dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n",
- *cookie_addr);
+ mwifiex_dbg(adapter, INFO,
+ "info: ACCESS_HW: sleep cookie=0x%x\n",
+ *cookie_addr);
if (*cookie_addr == FW_AWAKE_COOKIE)
return true;
}
@@ -164,7 +165,8 @@ static int mwifiex_pcie_resume(struct device *dev)
adapter = card->adapter;
if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "Device already resumed\n");
+ mwifiex_dbg(adapter, WARN,
+ "Device already resumed\n");
return 0;
}
@@ -361,16 +363,16 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
sleep_cookie = *(u32 *)buffer;
if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
- dev_dbg(adapter->dev,
- "sleep cookie found at count %d\n", count);
+ mwifiex_dbg(adapter, INFO,
+ "sleep cookie found at count %d\n", count);
break;
}
usleep_range(20, 30);
}
if (count >= max_delay_loop_cnt)
- dev_dbg(adapter->dev,
- "max count reached while accessing sleep cookie\n");
+ mwifiex_dbg(adapter, INFO,
+ "max count reached while accessing sleep cookie\n");
}
/* This function wakes up the card by reading fw_status register. */
@@ -380,20 +382,23 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- dev_dbg(adapter->dev, "event: Wakeup device...\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: Wakeup device...\n");
if (reg->sleep_cookie)
mwifiex_pcie_dev_wakeup_delay(adapter);
/* Reading fw_status register will wakeup device */
if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
- dev_warn(adapter->dev, "Reading fw_status register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Reading fw_status register failed\n");
return -1;
}
if (reg->sleep_cookie) {
mwifiex_pcie_dev_wakeup_delay(adapter);
- dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n");
+ mwifiex_dbg(adapter, INFO,
+ "PCIE wakeup: Setting PS_STATE_AWAKE\n");
adapter->ps_state = PS_STATE_AWAKE;
}
@@ -407,7 +412,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "cmd: Wakeup device completed\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Wakeup device completed\n");
return 0;
}
@@ -423,7 +429,8 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
if (mwifiex_pcie_ok_to_access_hw(adapter)) {
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
0x00000000)) {
- dev_warn(adapter->dev, "Disable host interrupt failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Disable host interrupt failed\n");
return -1;
}
}
@@ -443,7 +450,8 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
/* Simply write the mask to the register */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
HOST_INTR_MASK)) {
- dev_warn(adapter->dev, "Enable host interrupt failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Enable host interrupt failed\n");
return -1;
}
}
@@ -499,8 +507,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
GFP_KERNEL | GFP_DMA);
if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for RX ring.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb for RX ring.\n");
kfree(card->rxbd_ring_vbase);
return -ENOMEM;
}
@@ -512,10 +520,10 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
- dev_dbg(adapter->dev,
- "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
- skb, skb->len, skb->data, (u32)buf_pa,
- (u32)((u64)buf_pa >> 32));
+ mwifiex_dbg(adapter, INFO,
+ "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
card->rx_buf_list[i] = skb;
if (reg->pfu_enabled) {
@@ -556,8 +564,8 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
/* Allocate skb here so that firmware can DMA data from it */
skb = dev_alloc_skb(MAX_EVENT_SIZE);
if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for EVENT buf.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb for EVENT buf.\n");
kfree(card->evtbd_ring_vbase);
return -ENOMEM;
}
@@ -569,10 +577,10 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
- dev_dbg(adapter->dev,
- "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
- skb, skb->len, skb->data, (u32)buf_pa,
- (u32)((u64)buf_pa >> 32));
+ mwifiex_dbg(adapter, EVENT,
+ "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+ skb, skb->len, skb->data, (u32)buf_pa,
+ (u32)((u64)buf_pa >> 32));
card->evt_buf_list[i] = skb;
card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
@@ -715,21 +723,23 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
MWIFIEX_MAX_TXRX_BD;
- dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
- card->txbd_ring_size);
+ mwifiex_dbg(adapter, INFO,
+ "info: txbd_ring: Allocating %d bytes\n",
+ card->txbd_ring_size);
card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
card->txbd_ring_size,
&card->txbd_ring_pbase);
if (!card->txbd_ring_vbase) {
- dev_err(adapter->dev,
- "allocate consistent memory (%d bytes) failed!\n",
- card->txbd_ring_size);
+ mwifiex_dbg(adapter, ERROR,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->txbd_ring_size);
return -ENOMEM;
}
- dev_dbg(adapter->dev,
- "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
- card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
- (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
+ mwifiex_dbg(adapter, DATA,
+ "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
+ card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
+ (u32)((u64)card->txbd_ring_pbase >> 32),
+ card->txbd_ring_size);
return mwifiex_init_txq_ring(adapter);
}
@@ -777,23 +787,24 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
MWIFIEX_MAX_TXRX_BD;
- dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
- card->rxbd_ring_size);
+ mwifiex_dbg(adapter, INFO,
+ "info: rxbd_ring: Allocating %d bytes\n",
+ card->rxbd_ring_size);
card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
card->rxbd_ring_size,
&card->rxbd_ring_pbase);
if (!card->rxbd_ring_vbase) {
- dev_err(adapter->dev,
- "allocate consistent memory (%d bytes) failed!\n",
- card->rxbd_ring_size);
+ mwifiex_dbg(adapter, ERROR,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->rxbd_ring_size);
return -ENOMEM;
}
- dev_dbg(adapter->dev,
- "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
- card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
- (u32)((u64)card->rxbd_ring_pbase >> 32),
- card->rxbd_ring_size);
+ mwifiex_dbg(adapter, DATA,
+ "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+ card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
+ (u32)((u64)card->rxbd_ring_pbase >> 32),
+ card->rxbd_ring_size);
return mwifiex_init_rxq_ring(adapter);
}
@@ -840,23 +851,24 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
MWIFIEX_MAX_EVT_BD;
- dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
+ mwifiex_dbg(adapter, INFO,
+ "info: evtbd_ring: Allocating %d bytes\n",
card->evtbd_ring_size);
card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
card->evtbd_ring_size,
&card->evtbd_ring_pbase);
if (!card->evtbd_ring_vbase) {
- dev_err(adapter->dev,
- "allocate consistent memory (%d bytes) failed!\n",
- card->evtbd_ring_size);
+ mwifiex_dbg(adapter, ERROR,
+ "allocate consistent memory (%d bytes) failed!\n",
+ card->evtbd_ring_size);
return -ENOMEM;
}
- dev_dbg(adapter->dev,
- "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
- card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
- (u32)((u64)card->evtbd_ring_pbase >> 32),
- card->evtbd_ring_size);
+ mwifiex_dbg(adapter, EVENT,
+ "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
+ card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
+ (u32)((u64)card->evtbd_ring_pbase >> 32),
+ card->evtbd_ring_size);
return mwifiex_pcie_init_evt_ring(adapter);
}
@@ -895,8 +907,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
/* Allocate memory for receiving command response data */
skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
if (!skb) {
- dev_err(adapter->dev,
- "Unable to allocate skb for command response data.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb for command response data.\n");
return -ENOMEM;
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
@@ -944,14 +956,16 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
&card->sleep_cookie_pbase);
if (!card->sleep_cookie_vbase) {
- dev_err(adapter->dev, "pci_alloc_consistent failed!\n");
+ mwifiex_dbg(adapter, ERROR,
+ "pci_alloc_consistent failed!\n");
return -ENOMEM;
}
/* Init val of Sleep Cookie */
*(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
- dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
- *((u32 *)card->sleep_cookie_vbase));
+ mwifiex_dbg(adapter, INFO,
+ "alloc_scook: sleep cookie=0x%x\n",
+ *((u32 *)card->sleep_cookie_vbase));
return 0;
}
@@ -993,8 +1007,8 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
*/
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY)) {
- dev_err(adapter->dev,
- "failed to assert dnld-rdy interrupt.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "failed to assert dnld-rdy interrupt.\n");
return -1;
}
}
@@ -1018,13 +1032,14 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
/* Read the TX ring read pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
- dev_err(adapter->dev,
- "SEND COMP: failed to read reg->tx_rdptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND COMP: failed to read reg->tx_rdptr\n");
return -1;
}
- dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
- card->txbd_rdptr, rdptr);
+ mwifiex_dbg(adapter, DATA,
+ "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
+ card->txbd_rdptr, rdptr);
num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
/* free from previous txbd_rdptr to current txbd_rdptr */
@@ -1038,9 +1053,9 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
skb = card->tx_buf_list[wrdoneidx];
if (skb) {
- dev_dbg(adapter->dev,
- "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
- skb, wrdoneidx);
+ mwifiex_dbg(adapter, DATA,
+ "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
+ skb, wrdoneidx);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
@@ -1112,8 +1127,9 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
__le16 *tmp;
if (!(skb->data && skb->len)) {
- dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n",
- __func__, skb->data, skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "%s(): invalid parameter <%p, %#x>\n",
+ __func__, skb->data, skb->len);
return -1;
}
@@ -1121,7 +1137,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
mwifiex_pm_wakeup_card(adapter);
num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
- dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
+ mwifiex_dbg(adapter, DATA,
+ "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
card->txbd_rdptr, card->txbd_wrptr);
if (mwifiex_pcie_txbd_not_full(card)) {
u8 *payload;
@@ -1175,39 +1192,40 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
/* Write the TX ring write pointer in to reg->tx_wrptr */
if (mwifiex_write_reg(adapter, reg->tx_wrptr,
card->txbd_wrptr | rx_val)) {
- dev_err(adapter->dev,
- "SEND DATA: failed to write reg->tx_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND DATA: failed to write reg->tx_wrptr\n");
ret = -1;
goto done_unmap;
}
if ((mwifiex_pcie_txbd_not_full(card)) &&
tx_param->next_pkt_len) {
/* have more packets and TxBD still can hold more */
- dev_dbg(adapter->dev,
- "SEND DATA: delay dnld-rdy interrupt.\n");
+ mwifiex_dbg(adapter, DATA,
+ "SEND DATA: delay dnld-rdy interrupt.\n");
adapter->data_sent = false;
} else {
/* Send the TX ready interrupt */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY)) {
- dev_err(adapter->dev,
- "SEND DATA: failed to assert dnld-rdy interrupt.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND DATA: failed to assert dnld-rdy interrupt.\n");
ret = -1;
goto done_unmap;
}
}
- dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
- "%#x> and sent packet to firmware successfully\n",
- card->txbd_rdptr, card->txbd_wrptr);
+ mwifiex_dbg(adapter, DATA,
+ "info: SEND DATA: Updated <Rd: %#x, Wr:\t"
+ "%#x> and sent packet to firmware successfully\n",
+ card->txbd_rdptr, card->txbd_wrptr);
} else {
- dev_dbg(adapter->dev,
- "info: TX Ring full, can't send packets to fw\n");
+ mwifiex_dbg(adapter, DATA,
+ "info: TX Ring full, can't send packets to fw\n");
adapter->data_sent = true;
/* Send the TX ready interrupt */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DNLD_RDY))
- dev_err(adapter->dev,
- "SEND DATA: failed to assert door-bell intr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SEND DATA: failed to assert door-bell intr\n");
return -EBUSY;
}
@@ -1243,8 +1261,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
/* Read the RX ring Write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to read reg->rx_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
@@ -1277,15 +1295,15 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
rx_len = le16_to_cpu(pkt_len);
if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
- dev_err(adapter->dev,
- "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
- rx_len, card->rxbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, ERROR,
+ "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
+ rx_len, card->rxbd_rdptr, wrptr);
dev_kfree_skb_any(skb_data);
} else {
skb_put(skb_data, rx_len);
- dev_dbg(adapter->dev,
- "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
- card->rxbd_rdptr, wrptr, rx_len);
+ mwifiex_dbg(adapter, DATA,
+ "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
+ card->rxbd_rdptr, wrptr, rx_len);
skb_pull(skb_data, INTF_HEADER_LEN);
if (adapter->rx_work_enabled) {
skb_queue_tail(&adapter->rx_data_q, skb_data);
@@ -1299,8 +1317,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
GFP_KERNEL | GFP_DMA);
if (!skb_tmp) {
- dev_err(adapter->dev,
- "Unable to allocate skb.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Unable to allocate skb.\n");
return -ENOMEM;
}
@@ -1311,9 +1329,9 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
- dev_dbg(adapter->dev,
- "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
- skb_tmp, rd_index);
+ mwifiex_dbg(adapter, INFO,
+ "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
+ skb_tmp, rd_index);
card->rx_buf_list[rd_index] = skb_tmp;
if (reg->pfu_enabled) {
@@ -1336,28 +1354,29 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
reg->rx_rollover_ind) ^
reg->rx_rollover_ind);
}
- dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
- card->rxbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, DATA,
+ "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
+ card->rxbd_rdptr, wrptr);
tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
/* Write the RX ring read pointer in to reg->rx_rdptr */
if (mwifiex_write_reg(adapter, reg->rx_rdptr,
card->rxbd_rdptr | tx_val)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to write reg->rx_rdptr\n");
+ mwifiex_dbg(adapter, DATA,
+ "RECV DATA: failed to write reg->rx_rdptr\n");
ret = -1;
goto done;
}
/* Read the RX ring Write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to read reg->rx_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "RECV DATA: failed to read reg->rx_wrptr\n");
ret = -1;
goto done;
}
- dev_dbg(adapter->dev,
- "info: RECV DATA: Rcvd packet from fw successfully\n");
+ mwifiex_dbg(adapter, DATA,
+ "info: RECV DATA: Rcvd packet from fw successfully\n");
card->rxbd_wrptr = wrptr;
}
@@ -1376,9 +1395,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!(skb->data && skb->len)) {
- dev_err(adapter->dev,
- "Invalid parameter in %s <%p. len %d>\n",
- __func__, skb->data, skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Invalid parameter in %s <%p. len %d>\n",
+ __func__, skb->data, skb->len);
return -1;
}
@@ -1391,9 +1410,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
* address scratch register
*/
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
- dev_err(adapter->dev,
- "%s: failed to write download command to boot code.\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to write download command to boot code.\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1403,18 +1422,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
(u32)((u64)buf_pa >> 32))) {
- dev_err(adapter->dev,
- "%s: failed to write download command to boot code.\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to write download command to boot code.\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
/* Write the command length to cmd_size scratch register */
if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
- dev_err(adapter->dev,
- "%s: failed to write command len to cmd_size scratch reg\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to write command len to cmd_size scratch reg\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1422,8 +1441,8 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Ring the door bell */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DOOR_BELL)) {
- dev_err(adapter->dev,
- "%s: failed to assert door-bell intr\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to assert door-bell intr\n", __func__);
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1443,8 +1462,8 @@ static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
/* Write the RX ring read pointer in to reg->rx_rdptr */
if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
tx_wrap)) {
- dev_err(adapter->dev,
- "RECV DATA: failed to write reg->rx_rdptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "RECV DATA: failed to write reg->rx_rdptr\n");
return -1;
}
return 0;
@@ -1462,15 +1481,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
u8 *payload = (u8 *)skb->data;
if (!(skb->data && skb->len)) {
- dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
- __func__, skb->data, skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Invalid parameter in %s <%p, %#x>\n",
+ __func__, skb->data, skb->len);
return -1;
}
/* Make sure a command response buffer is available */
if (!card->cmdrsp_buf) {
- dev_err(adapter->dev,
- "No response buffer available, send command failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "No response buffer available, send command failed\n");
return -EBUSY;
}
@@ -1503,8 +1523,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
(u32)cmdrsp_buf_pa)) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1512,8 +1532,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
(u32)((u64)cmdrsp_buf_pa >> 32))) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1523,16 +1543,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
(u32)cmd_buf_pa)) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
/* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
(u32)((u64)cmd_buf_pa >> 32))) {
- dev_err(adapter->dev,
- "Failed to write download cmd to boot code.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write download cmd to boot code.\n");
ret = -1;
goto done;
}
@@ -1540,8 +1560,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Write the command length to reg->cmd_size */
if (mwifiex_write_reg(adapter, reg->cmd_size,
card->cmd_buf->len)) {
- dev_err(adapter->dev,
- "Failed to write cmd len to reg->cmd_size\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write cmd len to reg->cmd_size\n");
ret = -1;
goto done;
}
@@ -1549,8 +1569,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
/* Ring the door bell */
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_DOOR_BELL)) {
- dev_err(adapter->dev,
- "Failed to assert door-bell intr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to assert door-bell intr\n");
ret = -1;
goto done;
}
@@ -1574,7 +1594,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
u16 rx_len;
__le16 pkt_len;
- dev_dbg(adapter->dev, "info: Rx CMD Response\n");
+ mwifiex_dbg(adapter, CMD,
+ "info: Rx CMD Response\n");
mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
@@ -1598,8 +1619,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
- dev_warn(adapter->dev,
- "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
mwifiex_delay_for_sleep_cookie(adapter,
@@ -1608,8 +1629,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
} else {
- dev_err(adapter->dev,
- "There is no command but got cmdrsp\n");
+ mwifiex_dbg(adapter, ERROR,
+ "There is no command but got cmdrsp\n");
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
@@ -1628,15 +1649,15 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
will prevent firmware from writing to the same response
buffer again. */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
- dev_err(adapter->dev,
- "cmd_done: failed to clear cmd_rsp_addr_lo\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd_done: failed to clear cmd_rsp_addr_lo\n");
return -1;
}
/* Write the upper 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
- dev_err(adapter->dev,
- "cmd_done: failed to clear cmd_rsp_addr_hi\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd_done: failed to clear cmd_rsp_addr_hi\n");
return -1;
}
}
@@ -1678,25 +1699,28 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_pm_wakeup_card(adapter);
if (adapter->event_received) {
- dev_dbg(adapter->dev, "info: Event being processed, "
- "do not process this interrupt just yet\n");
+ mwifiex_dbg(adapter, EVENT,
+ "info: Event being processed,\t"
+ "do not process this interrupt just yet\n");
return 0;
}
if (rdptr >= MWIFIEX_MAX_EVT_BD) {
- dev_dbg(adapter->dev, "info: Invalid read pointer...\n");
+ mwifiex_dbg(adapter, ERROR,
+ "info: Invalid read pointer...\n");
return -1;
}
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "EventReady: failed to read reg->evt_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EventReady: failed to read reg->evt_wrptr\n");
return -1;
}
- dev_dbg(adapter->dev, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
- card->evtbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, EVENT,
+ "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
+ card->evtbd_rdptr, wrptr);
if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
& MWIFIEX_EVTBD_MASK)) ||
((wrptr & reg->evt_rollover_ind) ==
@@ -1705,7 +1729,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
__le16 data_len = 0;
u16 evt_len;
- dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
+ mwifiex_dbg(adapter, INFO,
+ "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
@@ -1721,9 +1746,10 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
len is 2 bytes followed by type which is 2 bytes */
memcpy(&data_len, skb_cmd->data, sizeof(__le16));
evt_len = le16_to_cpu(data_len);
-
+ skb_trim(skb_cmd, evt_len);
skb_pull(skb_cmd, INTF_HEADER_LEN);
- dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len);
+ mwifiex_dbg(adapter, EVENT,
+ "info: Event length: %d\n", evt_len);
if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE))
memcpy(adapter->event_body, skb_cmd->data +
@@ -1740,8 +1766,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
} else {
if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
CPU_INTR_EVENT_DONE)) {
- dev_warn(adapter->dev,
- "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
}
@@ -1766,15 +1792,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
return 0;
if (rdptr >= MWIFIEX_MAX_EVT_BD) {
- dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
- rdptr);
+ mwifiex_dbg(adapter, ERROR,
+ "event_complete: Invalid rdptr 0x%x\n",
+ rdptr);
return -EINVAL;
}
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
- dev_err(adapter->dev,
- "event_complete: failed to read reg->evt_wrptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "event_complete: failed to read reg->evt_wrptr\n");
return -1;
}
@@ -1791,9 +1818,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
desc->flags = 0;
skb = NULL;
} else {
- dev_dbg(adapter->dev,
- "info: ERROR: buf still valid at index %d, <%p, %p>\n",
- rdptr, card->evt_buf_list[rdptr], skb);
+ mwifiex_dbg(adapter, ERROR,
+ "info: ERROR: buf still valid at index %d, <%p, %p>\n",
+ rdptr, card->evt_buf_list[rdptr], skb);
}
if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
@@ -1802,18 +1829,20 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
reg->evt_rollover_ind);
}
- dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
- card->evtbd_rdptr, wrptr);
+ mwifiex_dbg(adapter, EVENT,
+ "info: Updated <Rd: 0x%x, Wr: 0x%x>",
+ card->evtbd_rdptr, wrptr);
/* Write the event ring read pointer in to reg->evt_rdptr */
if (mwifiex_write_reg(adapter, reg->evt_rdptr,
card->evtbd_rdptr)) {
- dev_err(adapter->dev,
- "event_complete: failed to read reg->evt_rdptr\n");
+ mwifiex_dbg(adapter, ERROR,
+ "event_complete: failed to read reg->evt_rdptr\n");
return -1;
}
- dev_dbg(adapter->dev, "info: Check Events Again\n");
+ mwifiex_dbg(adapter, EVENT,
+ "info: Check Events Again\n");
ret = mwifiex_pcie_process_event_ready(adapter);
return ret;
@@ -1840,17 +1869,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (!firmware || !firmware_len) {
- dev_err(adapter->dev,
- "No firmware image found! Terminating download\n");
+ mwifiex_dbg(adapter, ERROR,
+ "No firmware image found! Terminating download\n");
return -1;
}
- dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n",
- firmware_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: Downloading FW image (%d bytes)\n",
+ firmware_len);
if (mwifiex_pcie_disable_host_int(adapter)) {
- dev_err(adapter->dev,
- "%s: Disabling interrupts failed.\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Disabling interrupts failed.\n", __func__);
return -1;
}
@@ -1872,8 +1902,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_read_reg(adapter, reg->cmd_size,
&len);
if (ret) {
- dev_warn(adapter->dev,
- "Failed reading len from boot code\n");
+ mwifiex_dbg(adapter, FATAL,
+ "Failed reading len from boot code\n");
goto done;
}
if (len)
@@ -1884,8 +1914,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
- pr_err("FW download failure @ %d, invalid length %d\n",
- offset, len);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download failure @ %d, invalid length %d\n",
+ offset, len);
ret = -1;
goto done;
}
@@ -1895,14 +1926,16 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (len & BIT(0)) {
block_retry_cnt++;
if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) {
- pr_err("FW download failure @ %d, over max "
- "retry count\n", offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download failure @ %d, over max\t"
+ "retry count\n", offset);
ret = -1;
goto done;
}
- dev_err(adapter->dev, "FW CRC error indicated by the "
- "helper: len = 0x%04X, txlen = %d\n",
- len, txlen);
+ mwifiex_dbg(adapter, ERROR,
+ "FW CRC error indicated by the\t"
+ "helper: len = 0x%04X, txlen = %d\n",
+ len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
@@ -1913,7 +1946,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (firmware_len - offset < txlen)
txlen = firmware_len - offset;
- dev_dbg(adapter->dev, ".");
+ mwifiex_dbg(adapter, INFO, ".");
tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
card->pcie.blksz_fw_dl;
@@ -1927,8 +1960,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Send the boot command to device */
if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
- dev_err(adapter->dev,
- "Failed to send firmware download command\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to send firmware download command\n");
ret = -1;
goto done;
}
@@ -1937,9 +1970,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
&ireg_intr)) {
- dev_err(adapter->dev, "%s: Failed to read "
- "interrupt status during fw dnld.\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Failed to read\t"
+ "interrupt status during fw dnld.\n",
+ __func__);
mwifiex_unmap_pci_memory(adapter, skb,
PCI_DMA_TODEVICE);
ret = -1;
@@ -1953,8 +1987,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- dev_notice(adapter->dev,
- "info: FW download over, size %d bytes\n", offset);
+ mwifiex_dbg(adapter, MSG,
+ "info: FW download over, size %d bytes\n", offset);
ret = 0;
@@ -1980,15 +2014,17 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
/* Mask spurios interrupts */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
HOST_INTR_MASK)) {
- dev_warn(adapter->dev, "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
- dev_dbg(adapter->dev, "Setting driver ready signature\n");
+ mwifiex_dbg(adapter, INFO,
+ "Setting driver ready signature\n");
if (mwifiex_write_reg(adapter, reg->drv_rdy,
FIRMWARE_READY_PCIE)) {
- dev_err(adapter->dev,
- "Failed to write driver ready signature\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write driver ready signature\n");
return -1;
}
@@ -2015,12 +2051,13 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
&winner_status))
ret = -1;
else if (!winner_status) {
- dev_err(adapter->dev, "PCI-E is the winner\n");
+ mwifiex_dbg(adapter, INFO,
+ "PCI-E is the winner\n");
adapter->winner = 1;
} else {
- dev_err(adapter->dev,
- "PCI-E is not the winner <%#x,%d>, exit dnld\n",
- ret, adapter->winner);
+ mwifiex_dbg(adapter, ERROR,
+ "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+ ret, adapter->winner);
}
}
@@ -2039,7 +2076,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
return;
if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
- dev_warn(adapter->dev, "Read register failed\n");
+ mwifiex_dbg(adapter, ERROR, "Read register failed\n");
return;
}
@@ -2050,7 +2087,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
/* Clear the pending interrupts */
if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS,
~pcie_ireg)) {
- dev_warn(adapter->dev, "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return;
}
spin_lock_irqsave(&adapter->int_lock, flags);
@@ -2133,21 +2171,24 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
while (pcie_ireg & HOST_INTR_MASK) {
if (pcie_ireg & HOST_INTR_DNLD_DONE) {
pcie_ireg &= ~HOST_INTR_DNLD_DONE;
- dev_dbg(adapter->dev, "info: TX DNLD Done\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: TX DNLD Done\n");
ret = mwifiex_pcie_send_data_complete(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_UPLD_RDY) {
pcie_ireg &= ~HOST_INTR_UPLD_RDY;
- dev_dbg(adapter->dev, "info: Rx DATA\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: Rx DATA\n");
ret = mwifiex_pcie_process_recv_data(adapter);
if (ret)
return ret;
}
if (pcie_ireg & HOST_INTR_EVENT_RDY) {
pcie_ireg &= ~HOST_INTR_EVENT_RDY;
- dev_dbg(adapter->dev, "info: Rx EVENT\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: Rx EVENT\n");
ret = mwifiex_pcie_process_event_ready(adapter);
if (ret)
return ret;
@@ -2156,8 +2197,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (pcie_ireg & HOST_INTR_CMD_DONE) {
pcie_ireg &= ~HOST_INTR_CMD_DONE;
if (adapter->cmd_sent) {
- dev_dbg(adapter->dev,
- "info: CMD sent Interrupt\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: CMD sent Interrupt\n");
adapter->cmd_sent = false;
}
/* Handle command response */
@@ -2169,8 +2210,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_pcie_ok_to_access_hw(adapter)) {
if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
&pcie_ireg)) {
- dev_warn(adapter->dev,
- "Read register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Read register failed\n");
return -1;
}
@@ -2178,16 +2219,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_write_reg(adapter,
PCIE_HOST_INT_STATUS,
~pcie_ireg)) {
- dev_warn(adapter->dev,
- "Write register failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
return -1;
}
}
}
}
- dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
- adapter->cmd_sent, adapter->data_sent);
+ mwifiex_dbg(adapter, INTR,
+ "info: cmd_sent=%d data_sent=%d\n",
+ adapter->cmd_sent, adapter->data_sent);
if (adapter->ps_state != PS_STATE_SLEEP)
mwifiex_pcie_enable_host_int(adapter);
@@ -2209,7 +2251,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
struct mwifiex_tx_param *tx_param)
{
if (!skb) {
- dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "Passed NULL skb to %s\n", __func__);
return -1;
}
@@ -2232,7 +2275,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
if (ret) {
- dev_err(adapter->dev, "PCIE write err\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PCIE write err\n");
return RDWR_STATUS_FAILURE;
}
@@ -2243,24 +2287,25 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
if (ctrl_data != FW_DUMP_HOST_READY) {
- dev_info(adapter->dev,
- "The ctrl reg was changed, re-try again!\n");
+ mwifiex_dbg(adapter, WARN,
+ "The ctrl reg was changed, re-try again!\n");
ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
FW_DUMP_HOST_READY);
if (ret) {
- dev_err(adapter->dev, "PCIE write err\n");
+ mwifiex_dbg(adapter, ERROR,
+ "PCIE write err\n");
return RDWR_STATUS_FAILURE;
}
}
usleep_range(100, 200);
}
- dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+ mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
}
/* This function dump firmware memory to file */
-static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
@@ -2269,7 +2314,6 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
enum rdwr_status stat;
u32 memory_size;
int ret;
- static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
if (!card->pcie.can_dump_fw)
return;
@@ -2284,12 +2328,12 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
entry->mem_size = 0;
}
- dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+ mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
/* Read the number of the memories which will dump */
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
- goto done;
+ return;
reg = creg->fw_dump_start;
mwifiex_read_reg_byte(adapter, reg, &dump_num);
@@ -2300,7 +2344,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
- goto done;
+ return;
memory_size = 0;
reg = creg->fw_dump_start;
@@ -2311,36 +2355,36 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
}
if (memory_size == 0) {
- dev_info(adapter->dev, "Firmware dump Finished!\n");
+ mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
FW_DUMP_READ_DONE);
if (ret) {
- dev_err(adapter->dev, "PCIE write err\n");
- goto done;
+ mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
+ return;
}
break;
}
- dev_info(adapter->dev,
- "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+ mwifiex_dbg(adapter, DUMP,
+ "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
entry->mem_ptr = vmalloc(memory_size + 1);
entry->mem_size = memory_size;
if (!entry->mem_ptr) {
- dev_err(adapter->dev,
- "Vmalloc %s failed\n", entry->mem_name);
- goto done;
+ mwifiex_dbg(adapter, ERROR,
+ "Vmalloc %s failed\n", entry->mem_name);
+ return;
}
dbg_ptr = entry->mem_ptr;
end_ptr = dbg_ptr + memory_size;
doneflag = entry->done_flag;
- dev_info(adapter->dev, "Start %s output, please wait...\n",
- entry->mem_name);
+ mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n",
+ entry->mem_name);
do {
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
if (RDWR_STATUS_FAILURE == stat)
- goto done;
+ return;
reg_start = creg->fw_dump_start;
reg_end = creg->fw_dump_end;
@@ -2349,46 +2393,49 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
if (dbg_ptr < end_ptr) {
dbg_ptr++;
} else {
- dev_err(adapter->dev,
- "Allocated buf not enough\n");
- goto done;
+ mwifiex_dbg(adapter, ERROR,
+ "Allocated buf not enough\n");
+ return;
}
}
if (stat != RDWR_STATUS_DONE)
continue;
- dev_info(adapter->dev, "%s done: size=0x%tx\n",
- entry->mem_name, dbg_ptr - entry->mem_ptr);
+ mwifiex_dbg(adapter, DUMP,
+ "%s done: size=0x%tx\n",
+ entry->mem_name, dbg_ptr - entry->mem_ptr);
break;
} while (true);
}
- dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
- kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+ mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+}
-done:
- adapter->curr_mem_idx = 0;
+static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
+{
+ mwifiex_drv_info_dump(adapter);
+ mwifiex_pcie_fw_dump(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static unsigned long iface_work_flags;
static struct mwifiex_adapter *save_adapter;
static void mwifiex_pcie_work(struct work_struct *work)
{
- if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+ if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
&iface_work_flags))
- mwifiex_pcie_fw_dump_work(save_adapter);
+ mwifiex_pcie_device_dump_work(save_adapter);
}
static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
/* This function dumps FW information */
-static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
{
save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+ if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
schedule_work(&pcie_work);
}
@@ -2418,45 +2465,50 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
- dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n");
+ mwifiex_dbg(adapter, INFO,
+ "try set_consistent_dma_mask(32)\n");
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(adapter->dev, "set_dma_mask(32) failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "set_dma_mask(32) failed\n");
goto err_set_dma_mask;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "set_consistent_dma_mask(64) failed\n");
goto err_set_dma_mask;
}
ret = pci_request_region(pdev, 0, DRV_NAME);
if (ret) {
- dev_err(adapter->dev, "req_reg(0) error\n");
+ mwifiex_dbg(adapter, ERROR,
+ "req_reg(0) error\n");
goto err_req_region0;
}
card->pci_mmap = pci_iomap(pdev, 0, 0);
if (!card->pci_mmap) {
- dev_err(adapter->dev, "iomap(0) error\n");
+ mwifiex_dbg(adapter, ERROR, "iomap(0) error\n");
ret = -EIO;
goto err_iomap0;
}
ret = pci_request_region(pdev, 2, DRV_NAME);
if (ret) {
- dev_err(adapter->dev, "req_reg(2) error\n");
+ mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n");
goto err_req_region2;
}
card->pci_mmap1 = pci_iomap(pdev, 2, 0);
if (!card->pci_mmap1) {
- dev_err(adapter->dev, "iomap(2) error\n");
+ mwifiex_dbg(adapter, ERROR,
+ "iomap(2) error\n");
ret = -EIO;
goto err_iomap2;
}
- dev_dbg(adapter->dev,
- "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
- card->pci_mmap, card->pci_mmap1);
+ mwifiex_dbg(adapter, INFO,
+ "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
+ card->pci_mmap, card->pci_mmap1);
card->cmdrsp_buf = NULL;
ret = mwifiex_pcie_create_txbd_ring(adapter);
@@ -2521,10 +2573,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
if (user_rmmod) {
- dev_dbg(adapter->dev, "Clearing driver ready signature\n");
+ mwifiex_dbg(adapter, INFO,
+ "Clearing driver ready signature\n");
if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
- dev_err(adapter->dev,
- "Failed to write driver not-ready signature\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to write driver not-ready signature\n");
}
if (pdev) {
@@ -2555,7 +2608,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
"MRVL_PCIE", pdev);
if (ret) {
- pr_err("request_irq failed: ret=%d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "request_irq failed: ret=%d\n", ret);
adapter->card = NULL;
return -1;
}
@@ -2582,7 +2636,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg;
if (card) {
- dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s(): calling free_irq()\n", __func__);
free_irq(card->dev->irq, card->dev);
reg = card->pcie.reg;
@@ -2617,7 +2672,7 @@ static struct mwifiex_if_ops pcie_ops = {
.cleanup_mpa_buf = NULL,
.init_fw_port = mwifiex_pcie_init_fw_port,
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
- .fw_dump = mwifiex_pcie_fw_dump,
+ .device_dump = mwifiex_pcie_device_dump,
};
/*
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0ffdb7c5afd2..baf9715ddc10 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -241,20 +241,21 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
* LinkSys WRT54G && bss_desc->privacy
*/
) {
- dev_dbg(priv->adapter->dev, "info: %s: WPA:"
- " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: WPA:\t"
+ "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+ "EncMode=%#x privacy=%#x\n", __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).
+ vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).
+ ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
return true;
}
return false;
@@ -277,20 +278,21 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
- dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
- " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: WPA2:\t"
+ "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+ "EncMode=%#x privacy=%#x\n", __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).
+ vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).
+ ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
return true;
}
return false;
@@ -333,18 +335,19 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
((!bss_desc->bcn_rsn_ie) ||
((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
priv->sec_info.encryption_mode && bss_desc->privacy) {
- dev_dbg(priv->adapter->dev, "info: %s: dynamic "
- "WEP: wpa_ie=%#x wpa2_ie=%#x "
- "EncMode=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id : 0,
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: %s: dynamic\t"
+ "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
+ "EncMode=%#x privacy=%#x\n",
+ __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).
+ vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).
+ ieee_hdr.element_id : 0,
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
return true;
}
return false;
@@ -383,19 +386,20 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
return 0;
if (priv->wps.session_enable) {
- dev_dbg(adapter->dev,
- "info: return success directly in WPS period\n");
+ mwifiex_dbg(adapter, IOCTL,
+ "info: return success directly in WPS period\n");
return 0;
}
if (bss_desc->chan_sw_ie_present) {
- dev_err(adapter->dev,
- "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
+ mwifiex_dbg(adapter, INFO,
+ "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
return -1;
}
if (mwifiex_is_bss_wapi(priv, bss_desc)) {
- dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: return success for WAPI AP\n");
return 0;
}
@@ -405,7 +409,8 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
return 0;
} else if (mwifiex_is_bss_static_wep(priv, bss_desc)) {
/* Static WEP enabled */
- dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Disable 11n in WEP mode.\n");
bss_desc->disable_11n = true;
return 0;
} else if (mwifiex_is_bss_wpa(priv, bss_desc)) {
@@ -418,9 +423,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
if (mwifiex_is_wpa_oui_present
(bss_desc, CIPHER_SUITE_TKIP)) {
- dev_dbg(adapter->dev,
- "info: Disable 11n if AES "
- "is not supported by AP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Disable 11n if AES\t"
+ "is not supported by AP\n");
bss_desc->disable_11n = true;
} else {
return -1;
@@ -437,9 +442,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
if (mwifiex_is_rsn_oui_present
(bss_desc, CIPHER_SUITE_TKIP)) {
- dev_dbg(adapter->dev,
- "info: Disable 11n if AES "
- "is not supported by AP\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Disable 11n if AES\t"
+ "is not supported by AP\n");
bss_desc->disable_11n = true;
} else {
return -1;
@@ -455,17 +460,18 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
}
/* Security doesn't match */
- dev_dbg(adapter->dev,
- "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s "
- "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode, bss_desc->privacy);
+ mwifiex_dbg(adapter, ERROR,
+ "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
+ "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
+ __func__,
+ (bss_desc->bcn_wpa_ie) ?
+ (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
+ (bss_desc->bcn_rsn_ie) ?
+ (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
+ (priv->sec_info.wep_enabled) ? "e" : "d",
+ (priv->sec_info.wpa_enabled) ? "e" : "d",
+ (priv->sec_info.wpa2_enabled) ? "e" : "d",
+ priv->sec_info.encryption_mode, bss_desc->privacy);
return -1;
}
@@ -560,7 +566,8 @@ mwifiex_append_rate_tlv(struct mwifiex_private *priv,
else
rates_size = mwifiex_get_supported_rates(priv, rates);
- dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: SCAN_CMD: Rates size = %d\n",
rates_size);
rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
@@ -600,9 +607,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
u8 radio_type;
if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
- dev_dbg(priv->adapter->dev,
- "info: Scan: Null detect: %p, %p, %p\n",
- scan_cfg_out, chan_tlv_out, scan_chan_list);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: Scan: Null detect: %p, %p, %p\n",
+ scan_cfg_out, chan_tlv_out, scan_chan_list);
return -1;
}
@@ -645,16 +652,16 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
}
radio_type = tmp_chan_list->radio_type;
- dev_dbg(priv->adapter->dev,
- "info: Scan: Chan(%3d), Radio(%d),"
- " Mode(%d, %d), Dur(%d)\n",
- tmp_chan_list->chan_number,
- tmp_chan_list->radio_type,
- tmp_chan_list->chan_scan_mode_bitmap
- & MWIFIEX_PASSIVE_SCAN,
- (tmp_chan_list->chan_scan_mode_bitmap
- & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
- le16_to_cpu(tmp_chan_list->max_scan_time));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Scan: Chan(%3d), Radio(%d),\t"
+ "Mode(%d, %d), Dur(%d)\n",
+ tmp_chan_list->chan_number,
+ tmp_chan_list->radio_type,
+ tmp_chan_list->chan_scan_mode_bitmap
+ & MWIFIEX_PASSIVE_SCAN,
+ (tmp_chan_list->chan_scan_mode_bitmap
+ & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
+ le16_to_cpu(tmp_chan_list->max_scan_time));
/* Copy the current channel TLV to the command being
prepared */
@@ -718,9 +725,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* The total scan time should be less than scan command timeout
value */
if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
- dev_err(priv->adapter->dev, "total scan time %dms"
- " is over limit (%dms), scan skipped\n",
- total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "total scan time %dms\t"
+ "is over limit (%dms), scan skipped\n",
+ total_scan_time,
+ MWIFIEX_MAX_TOTAL_SCAN_TIME);
ret = -1;
break;
}
@@ -905,9 +914,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+ le16_to_cpu(wildcard_ssid_tlv->header.len));
- dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
- i, wildcard_ssid_tlv->ssid,
- wildcard_ssid_tlv->max_ssid_length);
+ mwifiex_dbg(adapter, INFO,
+ "info: scan: ssid[%d]: %s, %d\n",
+ i, wildcard_ssid_tlv->ssid,
+ wildcard_ssid_tlv->max_ssid_length);
/* Empty wildcard ssid with a maxlen will match many or
potentially all SSIDs (maxlen == 32), therefore do
@@ -928,8 +938,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
*filtered_scan = true;
if (user_scan_in->scan_chan_gap) {
- dev_dbg(adapter->dev, "info: scan: channel gap = %d\n",
- user_scan_in->scan_chan_gap);
+ mwifiex_dbg(adapter, INFO,
+ "info: scan: channel gap = %d\n",
+ user_scan_in->scan_chan_gap);
*max_chan_per_scan =
MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
@@ -961,8 +972,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
add tlv */
if (num_probes) {
- dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
- num_probes);
+ mwifiex_dbg(adapter, INFO,
+ "info: scan: num_probes = %d\n",
+ num_probes);
num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
@@ -1003,7 +1015,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
- dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Scan: Using supplied channel list\n");
for (chan_idx = 0;
chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX &&
@@ -1056,13 +1069,13 @@ mwifiex_config_scan(struct mwifiex_private *priv,
(user_scan_in->chan_list[0].chan_number ==
priv->curr_bss_params.bss_descriptor.channel)) {
*scan_current_only = true;
- dev_dbg(adapter->dev,
- "info: Scan: Scanning current channel only\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Scan: Scanning current channel only\n");
}
chan_num = chan_idx;
} else {
- dev_dbg(adapter->dev,
- "info: Scan: Creating full region channel list\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Scan: Creating full region channel list\n");
chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
scan_chan_list,
*filtered_scan);
@@ -1094,8 +1107,9 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
tlv_buf_left = tlv_buf_size;
*tlv_data = NULL;
- dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
- tlv_buf_size);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: tlv_buf_size = %d\n",
+ tlv_buf_size);
while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
@@ -1103,26 +1117,31 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
tlv_len = le16_to_cpu(current_tlv->header.len);
if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
- dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SCAN_RESP: TLV buffer corrupt\n");
break;
}
if (req_tlv_type == tlv_type) {
switch (tlv_type) {
case TLV_TYPE_TSFTIMESTAMP:
- dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
- "timestamp TLV, len = %d\n", tlv_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: TSF\t"
+ "timestamp TLV, len = %d\n",
+ tlv_len);
*tlv_data = current_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
- dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
- " band list TLV, len = %d\n", tlv_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: channel\t"
+ "band list TLV, len = %d\n",
+ tlv_len);
*tlv_data = current_tlv;
break;
default:
- dev_err(adapter->dev,
- "SCAN_RESP: unhandled TLV = %d\n",
- tlv_type);
+ mwifiex_dbg(adapter, ERROR,
+ "SCAN_RESP: unhandled TLV = %d\n",
+ tlv_type);
/* Give up, this seems corrupted */
return;
}
@@ -1177,8 +1196,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
total_ie_len = element_len + sizeof(struct ieee_types_header);
if (bytes_left < total_ie_len) {
- dev_err(adapter->dev, "err: InterpretIE: in processing"
- " IE, bytes left < IE length\n");
+ mwifiex_dbg(adapter, ERROR,
+ "err: InterpretIE: in processing\t"
+ "IE, bytes left < IE length\n");
return -1;
}
switch (element_id) {
@@ -1186,9 +1206,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
- dev_dbg(adapter->dev,
- "info: InterpretIE: ssid: %-32s\n",
- bss_entry->ssid.ssid);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: ssid: %-32s\n",
+ bss_entry->ssid.ssid);
break;
case WLAN_EID_SUPP_RATES:
@@ -1419,19 +1439,20 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
unsigned long flags;
if (adapter->scan_processing) {
- dev_err(adapter->dev, "cmd: Scan already in process...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
- dev_err(adapter->dev,
- "cmd: Scan is blocked during association...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan is blocked during association...\n");
return -EBUSY;
}
if (adapter->surprise_removed || adapter->is_cmd_timedout) {
- dev_err(adapter->dev,
- "Ignore scan. Card removed or firmware in bad state\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Ignore scan. Card removed or firmware in bad state\n");
return -EFAULT;
}
@@ -1478,7 +1499,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
/* Perform internal scan synchronously */
if (!priv->scan_request) {
- dev_dbg(adapter->dev, "wait internal scan\n");
+ mwifiex_dbg(adapter, INFO,
+ "wait internal scan\n");
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
@@ -1553,8 +1575,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
ret = mwifiex_is_network_compatible(priv, bss_desc,
priv->bss_mode);
if (ret)
- dev_err(priv->adapter->dev,
- "Incompatible network settings\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Incompatible network settings\n");
break;
default:
ret = 0;
@@ -1656,7 +1678,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
*/
if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
sizeof(struct mwifiex_fixed_bcn_param)) {
- dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ mwifiex_dbg(adapter, ERROR,
+ "InterpretIE: not enough bytes left\n");
return -EFAULT;
}
@@ -1669,7 +1692,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
rssi = (-rssi) * 100; /* Convert dBm to mBm */
current_ptr += sizeof(u8);
curr_bcn_bytes -= sizeof(u8);
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: RSSI=%d\n", rssi);
} else {
rssi = rssi_val;
}
@@ -1682,14 +1706,16 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
beacon_period = le16_to_cpu(bcn_param->beacon_period);
cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
/* Rest of the current buffer are IE's */
ie_buf = current_ptr;
ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
- curr_bcn_bytes);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
u8 element_id, element_len;
@@ -1698,8 +1724,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
element_len = *(current_ptr + 1);
if (curr_bcn_bytes < element_len +
sizeof(struct ieee_types_header)) {
- dev_err(adapter->dev,
- "%s: bytes left < IE length\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: bytes left < IE length\n", __func__);
return -EFAULT;
}
if (element_id == WLAN_EID_DS_PARAMS) {
@@ -1719,8 +1745,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
/* Skip entry if on csa closed channel */
if (channel == priv->csa_chan) {
- dev_dbg(adapter->dev,
- "Dropping entry on csa closed channel\n");
+ mwifiex_dbg(adapter, WARN,
+ "Dropping entry on csa closed channel\n");
return 0;
}
@@ -1751,7 +1777,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
cfg80211_put_bss(priv->wdev.wiphy, bss);
}
} else {
- dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
}
return 0;
@@ -1765,7 +1791,8 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
if (!priv->scan_request) {
- dev_dbg(adapter->dev, "complete internal scan\n");
+ mwifiex_dbg(adapter, INFO,
+ "complete internal scan\n");
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
}
@@ -1788,12 +1815,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
mwifiex_complete_scan(priv);
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: notifying scan done\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: notifying scan done\n");
cfg80211_scan_done(priv->scan_request, 0);
priv->scan_request = NULL;
} else {
priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: scan already aborted\n");
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
@@ -1809,12 +1838,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: aborting scan\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: aborting scan\n");
cfg80211_scan_done(priv->scan_request, 1);
priv->scan_request = NULL;
} else {
priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: scan already aborted\n");
}
} else {
/* Get scan command from scan_pending_q and put to
@@ -1877,8 +1908,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
- dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
- scan_rsp->number_of_sets);
+ mwifiex_dbg(adapter, ERROR,
+ "SCAN_RESP: too many AP returned (%d)\n",
+ scan_rsp->number_of_sets);
ret = -1;
goto check_next_scan;
}
@@ -1887,14 +1919,15 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
mwifiex_11h_get_csa_closed_channel(priv);
bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
- dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
- bytes_left);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: bss_descript_size %d\n",
+ bytes_left);
scan_resp_size = le16_to_cpu(resp->size);
- dev_dbg(adapter->dev,
- "info: SCAN_RESP: returned %d APs before parsing\n",
- scan_rsp->number_of_sets);
+ mwifiex_dbg(adapter, INFO,
+ "info: SCAN_RESP: returned %d APs before parsing\n",
+ scan_rsp->number_of_sets);
bss_info = scan_rsp->bss_desc_and_tlv_buffer;
@@ -2007,13 +2040,13 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
le16_to_cpu(fw_chan_stats->cca_scan_dur);
chan_stats.cca_busy_dur =
le16_to_cpu(fw_chan_stats->cca_busy_dur);
- dev_dbg(adapter->dev,
- "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
- chan_stats.chan_num,
- chan_stats.noise,
- chan_stats.total_bss,
- chan_stats.cca_scan_dur,
- chan_stats.cca_busy_dur);
+ mwifiex_dbg(adapter, INFO,
+ "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
+ chan_stats.chan_num,
+ chan_stats.noise,
+ chan_stats.total_bss,
+ chan_stats.cca_scan_dur,
+ chan_stats.cca_busy_dur);
memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats,
sizeof(struct mwifiex_chan_stats));
fw_chan_stats++;
@@ -2035,7 +2068,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
unsigned long cmd_flags, scan_flags;
bool complete_scan = false;
- dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
ext_scan_resp = &resp->params.ext_scan;
@@ -2048,8 +2081,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
len = le16_to_cpu(tlv->len);
if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) {
- dev_err(adapter->dev,
- "error processing scan response TLVs");
+ mwifiex_dbg(adapter, ERROR,
+ "error processing scan response TLVs");
break;
}
@@ -2075,8 +2108,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
cmd_ptr = (void *)cmd_node->cmd_skb->data;
if (le16_to_cpu(cmd_ptr->command) ==
HostCmd_CMD_802_11_SCAN_EXT) {
- dev_dbg(priv->adapter->dev,
- "Scan pending in command pending list");
+ mwifiex_dbg(adapter, INFO,
+ "Scan pending in command pending list");
complete_scan = false;
break;
}
@@ -2114,17 +2147,20 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
if (num_of_set > MWIFIEX_MAX_AP) {
- dev_err(adapter->dev,
- "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
- num_of_set);
+ mwifiex_dbg(adapter, ERROR,
+ "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+ num_of_set);
ret = -1;
goto check_next_scan;
}
bytes_left = scan_resp_size;
- dev_dbg(adapter->dev,
- "EXT_SCAN: size %d, returned %d APs...",
- scan_resp_size, num_of_set);
+ mwifiex_dbg(adapter, INFO,
+ "EXT_SCAN: size %d, returned %d APs...",
+ scan_resp_size, num_of_set);
+ mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf,
+ scan_resp_size +
+ sizeof(struct mwifiex_event_scan_result));
tlv = (struct mwifiex_ie_types_data *)scan_resp;
@@ -2132,7 +2168,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
type = le16_to_cpu(tlv->header.type);
len = le16_to_cpu(tlv->header.len);
if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
- dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EXT_SCAN: Error bytes left < TLV length\n");
break;
}
scan_rsp_tlv = NULL;
@@ -2158,8 +2195,9 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
len = le16_to_cpu(tlv->header.len);
if (bytes_left_for_tlv <
sizeof(struct mwifiex_ie_types_header) + len) {
- dev_err(adapter->dev,
- "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EXT_SCAN: Error in processing TLV,\t"
+ "bytes left < TLV length\n");
scan_rsp_tlv = NULL;
bytes_left_for_tlv = 0;
continue;
@@ -2199,8 +2237,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
if (scan_info_tlv) {
rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
rssi *= 100; /* Convert dBm to mBm */
- dev_dbg(adapter->dev,
- "info: InterpretIE: RSSI=%d\n", rssi);
+ mwifiex_dbg(adapter, INFO,
+ "info: InterpretIE: RSSI=%d\n", rssi);
fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
radio_type = &scan_info_tlv->radio_type;
} else {
@@ -2271,13 +2309,14 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
struct mwifiex_user_scan_cfg *scan_cfg;
if (adapter->scan_processing) {
- dev_err(adapter->dev, "cmd: Scan already in process...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan already in process...\n");
return -EBUSY;
}
if (priv->scan_block) {
- dev_err(adapter->dev,
- "cmd: Scan is blocked during association...\n");
+ mwifiex_dbg(adapter, WARN,
+ "cmd: Scan is blocked during association...\n");
return -EBUSY;
}
@@ -2309,8 +2348,9 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
int ret;
if (down_interruptible(&priv->async_sem)) {
- dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
- __func__);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: acquire semaphore fail\n",
+ __func__);
return -1;
}
@@ -2400,8 +2440,9 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
curr_bss->beacon_buf_size);
- dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
- priv->curr_bcn_size);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: current beacon saved %d\n",
+ priv->curr_bcn_size);
curr_bss->beacon_buf = priv->curr_bcn_buf;
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d10320f89bc1..a0b121f3460c 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -166,7 +166,8 @@ static int mwifiex_sdio_resume(struct device *dev)
adapter = card->adapter;
if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "device already resumed\n");
+ mwifiex_dbg(adapter, WARN,
+ "device already resumed\n");
return 0;
}
@@ -191,8 +192,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
struct mwifiex_adapter *adapter;
struct mwifiex_private *priv;
- pr_debug("info: SDIO func num=%d\n", func->num);
-
card = sdio_get_drvdata(func);
if (!card)
return;
@@ -201,6 +200,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
if (!adapter || !adapter->priv_num)
return;
+ mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
+
if (user_rmmod) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
@@ -257,12 +258,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
- dev_err(adapter->dev, "cmd: failed to suspend\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cmd: failed to suspend\n");
adapter->hs_enabling = false;
return -EFAULT;
}
- dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+ mwifiex_dbg(adapter, INFO,
+ "cmd: suspend with MMC_PM_KEEP_POWER\n");
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
/* Indicate device suspended */
@@ -386,8 +389,8 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
if (adapter->is_suspended) {
- dev_err(adapter->dev,
- "%s: not allowed while suspended\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: not allowed while suspended\n", __func__);
return -1;
}
@@ -434,7 +437,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
*/
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "event: wakeup device...\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: wakeup device...\n");
return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
}
@@ -446,7 +450,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
*/
static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
{
- dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
+ mwifiex_dbg(adapter, EVENT,
+ "cmd: wakeup device completed\n");
return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
}
@@ -524,7 +529,8 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
else
return -1;
cont:
- pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
+ mwifiex_dbg(adapter, INFO,
+ "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
/* Set Host interrupt reset to read to clear */
if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, &reg))
@@ -556,10 +562,12 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port);
if (ret) {
i++;
- dev_err(adapter->dev, "host_to_card, write iomem"
- " (%d) failed: %d\n", i, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "host_to_card, write iomem\t"
+ "(%d) failed: %d\n", i, ret);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
- dev_err(adapter->dev, "write CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "write CFG reg failed\n");
ret = -1;
if (i > MAX_WRITE_IOMEM_RETRY)
@@ -584,7 +592,8 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
const struct mwifiex_sdio_card_reg *reg = card->reg;
u32 rd_bitmap = card->mp_rd_bitmap;
- dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
if (card->supports_sdio_new_mode) {
if (!(rd_bitmap & reg->data_port_mask))
@@ -598,8 +607,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
(card->mp_rd_bitmap & CTRL_PORT_MASK)) {
card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
*port = CTRL_PORT;
- dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
- *port, card->mp_rd_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: port=%d mp_rd_bitmap=0x%08x\n",
+ *port, card->mp_rd_bitmap);
return 0;
}
@@ -613,9 +623,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
if (++card->curr_rd_port == card->max_ports)
card->curr_rd_port = reg->start_rd_port;
- dev_dbg(adapter->dev,
- "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
- *port, rd_bitmap, card->mp_rd_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
+ *port, rd_bitmap, card->mp_rd_bitmap);
return 0;
}
@@ -633,7 +643,8 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
const struct mwifiex_sdio_card_reg *reg = card->reg;
u32 wr_bitmap = card->mp_wr_bitmap;
- dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
if (!(wr_bitmap & card->mp_data_port_mask)) {
adapter->data_sent = true;
@@ -651,15 +662,16 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
}
if ((card->has_control_mask) && (*port == CTRL_PORT)) {
- dev_err(adapter->dev,
- "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
- *port, card->curr_wr_port, wr_bitmap,
- card->mp_wr_bitmap);
+ mwifiex_dbg(adapter, ERROR,
+ "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+ *port, card->curr_wr_port, wr_bitmap,
+ card->mp_wr_bitmap);
return -1;
}
- dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
- *port, wr_bitmap, card->mp_wr_bitmap);
+ mwifiex_dbg(adapter, DATA,
+ "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+ *port, wr_bitmap, card->mp_wr_bitmap);
return 0;
}
@@ -683,7 +695,8 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
usleep_range(10, 20);
}
- dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries);
+ mwifiex_dbg(adapter, ERROR,
+ "poll card status failed, tries = %d\n", tries);
return -1;
}
@@ -738,7 +751,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
if (mwifiex_read_data_sync(adapter, card->mp_regs,
card->reg->max_mp_regs,
REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
- dev_err(adapter->dev, "read mp_regs failed\n");
+ mwifiex_dbg(adapter, ERROR, "read mp_regs failed\n");
return;
}
@@ -751,7 +764,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
* UP_LD_CMD_PORT_HOST_INT_STATUS
* Clear the interrupt status register
*/
- dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+ mwifiex_dbg(adapter, INTR,
+ "int: sdio_ireg = %#x\n", sdio_ireg);
spin_lock_irqsave(&adapter->int_lock, flags);
adapter->int_status |= sdio_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
@@ -802,7 +816,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
/* Request the SDIO IRQ */
ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
if (ret) {
- dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "claim irq failed: ret=%d\n", ret);
goto out;
}
@@ -810,7 +825,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg,
card->reg->host_int_enable);
if (ret) {
- dev_err(adapter->dev, "enable host interrupt failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "enable host interrupt failed\n");
sdio_release_irq(func);
}
@@ -830,22 +846,25 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
u32 nb;
if (!buffer) {
- dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: buffer is NULL\n", __func__);
return -1;
}
ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1);
if (ret) {
- dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
+ mwifiex_dbg(adapter, ERROR,
+ "%s: read iomem failed: %d\n", __func__,
ret);
return -1;
}
nb = le16_to_cpu(*(__le16 *) (buffer));
if (nb > npayload) {
- dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n",
- __func__, nb, npayload);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: invalid packet, nb=%d npayload=%d\n",
+ __func__, nb, npayload);
return -1;
}
@@ -877,13 +896,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u32 i = 0;
if (!firmware_len) {
- dev_err(adapter->dev,
- "firmware image not found! Terminating download\n");
+ mwifiex_dbg(adapter, ERROR,
+ "firmware image not found! Terminating download\n");
return -1;
}
- dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
- firmware_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: downloading FW image (%d bytes)\n",
+ firmware_len);
/* Assume that the allocated buffer is 8-byte aligned */
fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
@@ -897,8 +917,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
DN_LD_CARD_RDY);
if (ret) {
- dev_err(adapter->dev, "FW download with helper:"
- " poll status timeout @ %d\n", offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download with helper:\t"
+ "poll status timeout @ %d\n", offset);
goto done;
}
@@ -910,19 +931,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
ret = mwifiex_read_reg(adapter, reg->base_0_reg,
&base0);
if (ret) {
- dev_err(adapter->dev,
- "dev BASE0 register read failed: "
- "base0=%#04X(%d). Terminating dnld\n",
- base0, base0);
+ mwifiex_dbg(adapter, ERROR,
+ "dev BASE0 register read failed:\t"
+ "base0=%#04X(%d). Terminating dnld\n",
+ base0, base0);
goto done;
}
ret = mwifiex_read_reg(adapter, reg->base_1_reg,
&base1);
if (ret) {
- dev_err(adapter->dev,
- "dev BASE1 register read failed: "
- "base1=%#04X(%d). Terminating dnld\n",
- base1, base1);
+ mwifiex_dbg(adapter, ERROR,
+ "dev BASE1 register read failed:\t"
+ "base1=%#04X(%d). Terminating dnld\n",
+ base1, base1);
goto done;
}
len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
@@ -936,9 +957,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (!len) {
break;
} else if (len > MWIFIEX_UPLD_SIZE) {
- dev_err(adapter->dev,
- "FW dnld failed @ %d, invalid length %d\n",
- offset, len);
+ mwifiex_dbg(adapter, ERROR,
+ "FW dnld failed @ %d, invalid length %d\n",
+ offset, len);
ret = -1;
goto done;
}
@@ -948,14 +969,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (len & BIT(0)) {
i++;
if (i > MAX_WRITE_IOMEM_RETRY) {
- dev_err(adapter->dev,
- "FW dnld failed @ %d, over max retry\n",
- offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW dnld failed @ %d, over max retry\n",
+ offset);
ret = -1;
goto done;
}
- dev_err(adapter->dev, "CRC indicated by the helper:"
- " len = 0x%04X, txlen = %d\n", len, txlen);
+ mwifiex_dbg(adapter, ERROR,
+ "CRC indicated by the helper:\t"
+ "len = 0x%04X, txlen = %d\n", len, txlen);
len &= ~BIT(0);
/* Setting this to 0 to resend from same offset */
txlen = 0;
@@ -978,11 +1000,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_SDIO_BLOCK_SIZE,
adapter->ioport);
if (ret) {
- dev_err(adapter->dev,
- "FW download, write iomem (%d) failed @ %d\n",
- i, offset);
+ mwifiex_dbg(adapter, ERROR,
+ "FW download, write iomem (%d) failed @ %d\n",
+ i, offset);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
- dev_err(adapter->dev, "write CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "write CFG reg failed\n");
ret = -1;
goto done;
@@ -991,8 +1014,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- dev_notice(adapter->dev,
- "info: FW download over, size %d bytes\n", offset);
+ mwifiex_dbg(adapter, MSG,
+ "info: FW download over, size %d bytes\n", offset);
ret = 0;
done:
@@ -1066,18 +1089,20 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
blk_num = *(data + BLOCK_NUMBER_OFFSET);
blk_size = adapter->sdio_rx_block_size * blk_num;
if (blk_size > total_pkt_len) {
- dev_err(adapter->dev, "%s: error in pkt,\t"
- "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
- __func__, blk_num, blk_size, total_pkt_len);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: error in blk_size,\t"
+ "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+ __func__, blk_num, blk_size, total_pkt_len);
break;
}
pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
2));
if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
- dev_err(adapter->dev, "%s: error in pkt,\t"
- "pkt_len=%d, blk_size=%d\n",
- __func__, pkt_len, blk_size);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: error in pkt_len,\t"
+ "pkt_len=%d, blk_size=%d\n",
+ __func__, pkt_len, blk_size);
break;
}
skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
@@ -1116,7 +1141,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
switch (upld_typ) {
case MWIFIEX_TYPE_AGGR_DATA:
- dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: --- Rx: Aggr Data packet ---\n");
rx_info = MWIFIEX_SKB_RXCB(skb);
rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
if (adapter->rx_work_enabled) {
@@ -1130,7 +1156,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_TYPE_DATA:
- dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
+ mwifiex_dbg(adapter, DATA,
+ "info: --- Rx: Data packet ---\n");
if (adapter->rx_work_enabled) {
skb_queue_tail(&adapter->rx_data_q, skb);
adapter->data_received = true;
@@ -1141,7 +1168,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_TYPE_CMD:
- dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
+ mwifiex_dbg(adapter, CMD,
+ "info: --- Rx: Cmd Response ---\n");
/* take care of curr_cmd = NULL case */
if (!adapter->curr_cmd) {
cmd_buf = adapter->upld_buf;
@@ -1163,7 +1191,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_TYPE_EVENT:
- dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
+ mwifiex_dbg(adapter, EVENT,
+ "info: --- Rx: Event ---\n");
adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
@@ -1178,7 +1207,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
break;
default:
- dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
+ mwifiex_dbg(adapter, ERROR,
+ "unknown upload type %#x\n", upld_typ);
dev_kfree_skb_any(skb);
break;
}
@@ -1210,16 +1240,18 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if ((card->has_control_mask) && (port == CTRL_PORT)) {
/* Read the command Resp without aggr */
- dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
- "response\n", __func__);
+ mwifiex_dbg(adapter, CMD,
+ "info: %s: no aggregation for cmd\t"
+ "response\n", __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
}
if (!card->mpa_rx.enabled) {
- dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
- __func__);
+ mwifiex_dbg(adapter, WARN,
+ "info: %s: rx aggregation disabled\n",
+ __func__);
f_do_rx_cur = 1;
goto rx_curr_single;
@@ -1230,7 +1262,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
(card->has_control_mask && (card->mp_rd_bitmap &
(~((u32) CTRL_PORT_MASK))))) {
/* Some more data RX pending */
- dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: not last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
@@ -1247,7 +1280,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
} else {
/* No more data RX pending */
- dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: last packet\n", __func__);
if (MP_RX_AGGR_IN_PROGRESS(card)) {
f_do_rx_aggr = 1;
@@ -1262,14 +1296,16 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
}
if (f_aggr_cur) {
- dev_dbg(adapter->dev, "info: current packet aggregation\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
mp_rx_aggr_setup(card, rx_len, port);
if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
mp_rx_aggr_port_limit_reached(card)) {
- dev_dbg(adapter->dev, "info: %s: aggregated packet "
- "limit reached\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: aggregated packet\t"
+ "limit reached\n", __func__);
/* No more pkts allowed in Aggr buf, rx it */
f_do_rx_aggr = 1;
}
@@ -1277,8 +1313,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
if (f_do_rx_aggr) {
/* do aggr RX now */
- dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
- card->mpa_rx.pkt_cnt);
+ mwifiex_dbg(adapter, DATA,
+ "info: do_rx_aggr: num of packets: %d\n",
+ card->mpa_rx.pkt_cnt);
if (card->supports_sdio_new_mode) {
int i;
@@ -1318,8 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
GFP_KERNEL |
GFP_DMA);
if (!skb_deaggr) {
- dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
- pkt_len, pkt_type);
+ mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
+ "drop pkt len=%d type=%d\n",
+ pkt_len, pkt_type);
curr_ptr += len_arr[pind];
continue;
}
@@ -1339,12 +1377,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
mwifiex_decode_rx_packet(adapter, skb_deaggr,
pkt_type);
} else {
- dev_err(adapter->dev, " drop wrong aggr pkt:\t"
- "sdio_single_port_rx_aggr=%d\t"
- "type=%d len=%d max_len=%d\n",
- adapter->sdio_rx_aggr_enable,
- pkt_type, pkt_len,
- len_arr[pind]);
+ mwifiex_dbg(adapter, ERROR,
+ "drop wrong aggr pkt:\t"
+ "sdio_single_port_rx_aggr=%d\t"
+ "type=%d len=%d max_len=%d\n",
+ adapter->sdio_rx_aggr_enable,
+ pkt_type, pkt_len, len_arr[pind]);
dev_kfree_skb_any(skb_deaggr);
}
curr_ptr += len_arr[pind];
@@ -1354,13 +1392,14 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
rx_curr_single:
if (f_do_rx_cur) {
- dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
- port, rx_len);
+ mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
+ port, rx_len);
skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
if (!skb) {
- dev_err(adapter->dev, "single skb allocated fail,\t"
- "drop pkt port=%d len=%d\n", port, rx_len);
+ mwifiex_dbg(adapter, ERROR,
+ "single skb allocated fail,\t"
+ "drop pkt port=%d len=%d\n", port, rx_len);
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
card->mpa_rx.buf, rx_len,
adapter->ioport + port))
@@ -1376,9 +1415,9 @@ rx_curr_single:
goto error;
if (!adapter->sdio_rx_aggr_enable &&
pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
- dev_err(adapter->dev, "drop wrong pkt type %d\t"
- "current SDIO RX Aggr not enabled\n",
- pkt_type);
+ mwifiex_dbg(adapter, ERROR, "drop wrong pkt type %d\t"
+ "current SDIO RX Aggr not enabled\n",
+ pkt_type);
dev_kfree_skb_any(skb);
return 0;
}
@@ -1386,7 +1425,8 @@ rx_curr_single:
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
if (f_post_aggr_cur) {
- dev_dbg(adapter->dev, "info: current packet aggregation\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: current packet aggregation\n");
/* Curr pkt can be aggregated */
mp_rx_aggr_setup(card, rx_len, port);
}
@@ -1458,7 +1498,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
MWIFIEX_RX_DATA_BUF_SIZE)
return -1;
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
- dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+ mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
if (!skb)
@@ -1469,17 +1509,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
skb->len, adapter->ioport |
CMD_PORT_SLCT)) {
- dev_err(adapter->dev,
- "%s: failed to card_to_host", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to card_to_host", __func__);
dev_kfree_skb_any(skb);
goto term_cmd;
}
if ((pkt_type != MWIFIEX_TYPE_CMD) &&
(pkt_type != MWIFIEX_TYPE_EVENT))
- dev_err(adapter->dev,
- "%s:Received wrong packet on cmd port",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s:Received wrong packet on cmd port",
+ __func__);
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
@@ -1495,12 +1535,13 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
}
card->mp_wr_bitmap = bitmap;
- dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
- card->mp_wr_bitmap);
+ mwifiex_dbg(adapter, INTR,
+ "int: DNLD: wr_bitmap=0x%x\n",
+ card->mp_wr_bitmap);
if (adapter->data_sent &&
(card->mp_wr_bitmap & card->mp_data_port_mask)) {
- dev_dbg(adapter->dev,
- "info: <--- Tx DONE Interrupt --->\n");
+ mwifiex_dbg(adapter, INTR,
+ "info: <--- Tx DONE Interrupt --->\n");
adapter->data_sent = false;
}
}
@@ -1517,8 +1558,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
adapter->cmd_sent = false;
}
- dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
- adapter->cmd_sent, adapter->data_sent);
+ mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n",
+ adapter->cmd_sent, adapter->data_sent);
if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
@@ -1529,40 +1570,45 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
}
card->mp_rd_bitmap = bitmap;
- dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
- card->mp_rd_bitmap);
+ mwifiex_dbg(adapter, INTR,
+ "int: UPLD: rd_bitmap=0x%x\n",
+ card->mp_rd_bitmap);
while (true) {
ret = mwifiex_get_rd_port(adapter, &port);
if (ret) {
- dev_dbg(adapter->dev,
- "info: no more rd_port available\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: no more rd_port available\n");
break;
}
len_reg_l = reg->rd_len_p0_l + (port << 1);
len_reg_u = reg->rd_len_p0_u + (port << 1);
rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
rx_len |= (u16) card->mp_regs[len_reg_l];
- dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
- port, rx_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: RX: port=%d rx_len=%u\n",
+ port, rx_len);
rx_blocks =
(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
1) / MWIFIEX_SDIO_BLOCK_SIZE;
if (rx_len <= INTF_HEADER_LEN ||
(rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
card->mpa_rx.buf_size) {
- dev_err(adapter->dev, "invalid rx_len=%d\n",
- rx_len);
+ mwifiex_dbg(adapter, ERROR,
+ "invalid rx_len=%d\n",
+ rx_len);
return -1;
}
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
- dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+ mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n",
+ rx_len);
if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
port)) {
- dev_err(adapter->dev, "card_to_host_mpa failed:"
- " int status=%#x\n", sdio_ireg);
+ mwifiex_dbg(adapter, ERROR,
+ "card_to_host_mpa failed: int status=%#x\n",
+ sdio_ireg);
goto term_cmd;
}
}
@@ -1573,19 +1619,23 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
term_cmd:
/* terminate cmd */
if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
- dev_err(adapter->dev, "read CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR, "read CFG reg failed\n");
else
- dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
+ mwifiex_dbg(adapter, INFO,
+ "info: CFG reg val = %d\n", cr);
if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
- dev_err(adapter->dev, "write CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "write CFG reg failed\n");
else
- dev_dbg(adapter->dev, "info: write success\n");
+ mwifiex_dbg(adapter, INFO, "info: write success\n");
if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
- dev_err(adapter->dev, "read CFG reg failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "read CFG reg failed\n");
else
- dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
+ mwifiex_dbg(adapter, INFO,
+ "info: CFG reg val =%x\n", cr);
return -1;
}
@@ -1619,8 +1669,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (!card->mpa_tx.enabled ||
(card->has_control_mask && (port == CTRL_PORT)) ||
(card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
- dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
- __func__);
+ mwifiex_dbg(adapter, WARN,
+ "info: %s: tx aggregation disabled\n",
+ __func__);
f_send_cur_buf = 1;
goto tx_curr_single;
@@ -1628,8 +1679,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (next_pkt_len) {
/* More pkt in TX queue */
- dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
- __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: more packets in queue.\n",
+ __func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
@@ -1659,8 +1711,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
} else {
/* Last pkt in TX queue */
- dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
- __func__);
+ mwifiex_dbg(adapter, INFO,
+ "info: %s: Last packet in Tx Queue.\n",
+ __func__);
if (MP_TX_AGGR_IN_PROGRESS(card)) {
/* some packs in Aggr buf already */
@@ -1677,8 +1730,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
if (f_precopy_cur_buf) {
- dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
- __func__);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: precopy current buffer\n",
+ __func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
@@ -1688,9 +1742,10 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
}
if (f_send_aggr_buf) {
- dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
- __func__,
- card->mpa_tx.start_port, card->mpa_tx.ports);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: send aggr buffer: %d %d\n",
+ __func__, card->mpa_tx.start_port,
+ card->mpa_tx.ports);
if (card->supports_sdio_new_mode) {
u32 port_count;
int i;
@@ -1719,15 +1774,17 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
tx_curr_single:
if (f_send_cur_buf) {
- dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
- __func__, port);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: send current buffer %d\n",
+ __func__, port);
ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
adapter->ioport + port);
}
if (f_postcopy_cur_buf) {
- dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
- __func__);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: postcopy current buffer\n",
+ __func__);
MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
}
@@ -1771,8 +1828,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (type == MWIFIEX_TYPE_DATA) {
ret = mwifiex_get_wr_port_data(adapter, &port);
if (ret) {
- dev_err(adapter->dev, "%s: no wr_port available\n",
- __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: no wr_port available\n",
+ __func__);
return ret;
}
} else {
@@ -1781,8 +1839,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
if (pkt_len <= INTF_HEADER_LEN ||
pkt_len > MWIFIEX_UPLD_SIZE)
- dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
- __func__, payload, pkt_len);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: payload=%p, nb=%d\n",
+ __func__, payload, pkt_len);
if (card->supports_sdio_new_mode)
port = CMD_PORT_SLCT;
@@ -1896,7 +1955,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
sdio_release_host(func);
if (ret) {
- pr_err("cannot set SDIO block size\n");
+ mwifiex_dbg(adapter, ERROR,
+ "cannot set SDIO block size\n");
return ret;
}
@@ -1977,7 +2037,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mp_tx_agg_buf_size,
card->mp_rx_agg_buf_size);
if (ret) {
- dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
+ mwifiex_dbg(adapter, ERROR,
+ "failed to alloc sdio mp-a buffers\n");
kfree(card->mp_regs);
return -1;
}
@@ -2041,8 +2102,9 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
card->curr_wr_port = reg->start_wr_port;
- dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
- port, card->mp_data_port_mask);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: mp_end_port %d, data port mask 0x%x\n",
+ port, card->mp_data_port_mask);
}
static struct mwifiex_adapter *save_adapter;
@@ -2059,7 +2121,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
* We run it in a totally independent workqueue.
*/
- pr_err("Resetting card...\n");
+ mwifiex_dbg(adapter, WARN, "Resetting card...\n");
mmc_remove_host(target);
/* 200ms delay is based on experiment with sdhci controller */
mdelay(200);
@@ -2079,14 +2141,14 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
&ret);
if (ret) {
- dev_err(adapter->dev, "SDIO Write ERR\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
return RDWR_STATUS_FAILURE;
}
for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
&ret);
if (ret) {
- dev_err(adapter->dev, "SDIO read err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
return RDWR_STATUS_FAILURE;
}
if (ctrl_data == FW_DUMP_DONE)
@@ -2094,19 +2156,20 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
if (ctrl_data != FW_DUMP_HOST_READY) {
- dev_info(adapter->dev,
- "The ctrl reg was changed, re-try again!\n");
+ mwifiex_dbg(adapter, WARN,
+ "The ctrl reg was changed, re-try again!\n");
sdio_writeb(card->func, FW_DUMP_HOST_READY,
card->reg->fw_dump_ctrl, &ret);
if (ret) {
- dev_err(adapter->dev, "SDIO write err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
return RDWR_STATUS_FAILURE;
}
}
usleep_range(100, 200);
}
if (ctrl_data == FW_DUMP_HOST_READY) {
- dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Fail to pull ctrl_data\n");
return RDWR_STATUS_FAILURE;
}
@@ -2114,7 +2177,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
}
/* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
int ret = 0;
@@ -2122,9 +2185,6 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
enum rdwr_status stat;
u32 memory_size;
- static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
-
- mwifiex_dump_drv_info(adapter);
if (!card->can_dump_fw)
return;
@@ -2142,7 +2202,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
mwifiex_pm_wakeup_card(adapter);
sdio_claim_host(card->func);
- dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
if (stat == RDWR_STATUS_FAILURE)
@@ -2152,7 +2212,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
/* Read the number of the memories which will dump */
dump_num = sdio_readb(card->func, reg, &ret);
if (ret) {
- dev_err(adapter->dev, "SDIO read memory length err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO read memory length err\n");
goto done;
}
@@ -2169,7 +2229,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
for (i = 0; i < 4; i++) {
read_reg = sdio_readb(card->func, reg, &ret);
if (ret) {
- dev_err(adapter->dev, "SDIO read err\n");
+ mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
goto done;
}
memory_size |= (read_reg << i*8);
@@ -2177,25 +2237,33 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
}
if (memory_size == 0) {
- dev_info(adapter->dev, "Firmware dump Finished!\n");
+ mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n");
+ ret = mwifiex_write_reg(adapter,
+ card->reg->fw_dump_ctrl,
+ FW_DUMP_READ_DONE);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
+ return;
+ }
break;
}
- dev_info(adapter->dev,
- "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+ mwifiex_dbg(adapter, DUMP,
+ "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
entry->mem_ptr = vmalloc(memory_size + 1);
entry->mem_size = memory_size;
if (!entry->mem_ptr) {
- dev_err(adapter->dev, "Vmalloc %s failed\n",
- entry->mem_name);
+ mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n",
+ entry->mem_name);
goto done;
}
dbg_ptr = entry->mem_ptr;
end_ptr = dbg_ptr + memory_size;
doneflag = entry->done_flag;
- dev_info(adapter->dev, "Start %s output, please wait...\n",
- entry->mem_name);
+ mwifiex_dbg(adapter, DUMP,
+ "Start %s output, please wait...\n",
+ entry->mem_name);
do {
stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
@@ -2207,39 +2275,43 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
for (reg = reg_start; reg <= reg_end; reg++) {
*dbg_ptr = sdio_readb(card->func, reg, &ret);
if (ret) {
- dev_err(adapter->dev,
- "SDIO read err\n");
+ mwifiex_dbg(adapter, ERROR,
+ "SDIO read err\n");
goto done;
}
if (dbg_ptr < end_ptr)
dbg_ptr++;
else
- dev_err(adapter->dev,
- "Allocated buf not enough\n");
+ mwifiex_dbg(adapter, ERROR,
+ "Allocated buf not enough\n");
}
if (stat != RDWR_STATUS_DONE)
continue;
- dev_info(adapter->dev, "%s done: size=0x%tx\n",
- entry->mem_name, dbg_ptr - entry->mem_ptr);
+ mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n",
+ entry->mem_name, dbg_ptr - entry->mem_ptr);
break;
} while (1);
}
- dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
- kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
done:
sdio_release_host(card->func);
- adapter->curr_mem_idx = 0;
+}
+
+static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
+{
+ mwifiex_drv_info_dump(adapter);
+ mwifiex_sdio_fw_dump(adapter);
+ mwifiex_upload_device_dump(adapter);
}
static void mwifiex_sdio_work(struct work_struct *work)
{
- if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+ if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
&iface_work_flags))
- mwifiex_sdio_fw_dump_work(save_adapter);
+ mwifiex_sdio_device_dump_work(save_adapter);
if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
&iface_work_flags))
mwifiex_sdio_card_reset_work(save_adapter);
@@ -2259,13 +2331,13 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
}
/* This function dumps FW information */
-static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter)
{
save_adapter = adapter;
- if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+ if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
return;
- set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+ set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
schedule_work(&sdio_work);
}
@@ -2285,7 +2357,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
if (!p)
return 0;
- dev_info(adapter->dev, "SDIO register DUMP START\n");
+ mwifiex_dbg(adapter, MSG, "SDIO register dump start\n");
mwifiex_pm_wakeup_card(adapter);
@@ -2351,13 +2423,13 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
reg++;
}
- dev_info(adapter->dev, "%s\n", buf);
+ mwifiex_dbg(adapter, MSG, "%s\n", buf);
p += sprintf(p, "%s\n", buf);
}
sdio_release_host(cardp->func);
- dev_info(adapter->dev, "SDIO register DUMP END\n");
+ mwifiex_dbg(adapter, MSG, "SDIO register dump end\n");
return p - drv_buf;
}
@@ -2382,8 +2454,8 @@ static struct mwifiex_if_ops sdio_ops = {
.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
.event_complete = mwifiex_sdio_event_complete,
.card_reset = mwifiex_sdio_card_reset,
- .fw_dump = mwifiex_sdio_fw_dump,
.reg_dump = mwifiex_sdio_reg_dump,
+ .device_dump = mwifiex_sdio_device_dump,
.deaggr_pkt = mwifiex_deaggr_sdio_pkt,
};
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 49422f2a5380..037adcd1f484 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -77,8 +77,8 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
if (cmd_action != HostCmd_ACT_GEN_SET) {
- dev_err(priv->adapter->dev,
- "mac_control: only support set cmd\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "mac_control: only support set cmd\n");
return -1;
}
@@ -112,7 +112,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
{
struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
- dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
- 1 + S_DS_GEN);
@@ -129,11 +130,11 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
le16_add_cpu(&cmd->size, sizeof(u16));
}
- dev_dbg(priv->adapter->dev,
- "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
- " Value=0x%x\n",
- cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
- le16_to_cpu(*(__le16 *) snmp_mib->value));
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t"
+ "OIDSize=0x%x, Value=0x%x\n",
+ cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
+ le16_to_cpu(*(__le16 *)snmp_mib->value));
return 0;
}
@@ -356,9 +357,9 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
(hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
((adapter->arp_filter_size > 0) &&
(adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
- dev_dbg(adapter->dev,
- "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
- adapter->arp_filter_size);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
+ adapter->arp_filter_size);
memcpy(((u8 *) hs_cfg) +
sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
adapter->arp_filter, adapter->arp_filter_size);
@@ -378,11 +379,11 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
hs_cfg->params.hs_config.gpio = hscfg_param->gpio;
hs_cfg->params.hs_config.gap = hscfg_param->gap;
- dev_dbg(adapter->dev,
- "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
- hs_cfg->params.hs_config.conditions,
- hs_cfg->params.hs_config.gpio,
- hs_cfg->params.hs_config.gap);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
+ hs_cfg->params.hs_config.conditions,
+ hs_cfg->params.hs_config.gpio,
+ hs_cfg->params.hs_config.gap);
}
return 0;
@@ -462,7 +463,7 @@ static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
/* Set AP MAC address */
memcpy(deauth->mac_addr, mac, ETH_ALEN);
- dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
+ mwifiex_dbg(priv->adapter, CMD, "cmd: Deauth: %pM\n", deauth->mac_addr);
deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
@@ -540,9 +541,9 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
} else if (!priv->wep_key[i].key_length) {
continue;
} else {
- dev_err(priv->adapter->dev,
- "key%d Length = %d is incorrect\n",
- (i + 1), priv->wep_key[i].key_length);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "key%d Length = %d is incorrect\n",
+ (i + 1), priv->wep_key[i].key_length);
return -1;
}
}
@@ -562,7 +563,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
u16 size, len = KEY_PARAMS_FIXED_LEN;
if (enc_key->is_igtk_key) {
- dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set CMAC AES Key\n", __func__);
if (enc_key->is_rx_seq_valid)
memcpy(km->key_param_set.key_params.cmac_aes.ipn,
enc_key->pn, enc_key->pn_len);
@@ -575,7 +577,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
enc_key->key_material, enc_key->key_len);
len += sizeof(struct mwifiex_cmac_aes_param);
} else {
- dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set AES Key\n", __func__);
if (enc_key->is_rx_seq_valid)
memcpy(km->key_param_set.key_params.aes.pn,
enc_key->pn, enc_key->pn_len);
@@ -619,7 +622,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
km->action = cpu_to_le16(cmd_action);
if (cmd_action == HostCmd_ACT_GEN_GET) {
- dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Get key\n", __func__);
km->key_param_set.key_idx =
enc_key->key_index & KEY_INDEX_MASK;
km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
@@ -646,7 +649,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_type_key_param_set_v2));
if (enc_key->key_disable) {
- dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Remove key\n", __func__);
km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
@@ -667,7 +670,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
- dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Set WEP Key\n", __func__);
len += sizeof(struct mwifiex_wep_param);
km->key_param_set.len = cpu_to_le16(len);
km->key_param_set.key_type = KEY_TYPE_ID_WEP;
@@ -710,7 +713,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
if (enc_key->is_wapi_key) {
- dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+ mwifiex_dbg(adapter, INFO, "%s: Set WAPI Key\n", __func__);
km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
PN_LEN);
@@ -750,7 +753,8 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
- dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+ mwifiex_dbg(adapter, INFO,
+ "%s: Set TKIP Key\n", __func__);
if (enc_key->is_rx_seq_valid)
memcpy(km->key_param_set.key_params.tkip.pn,
enc_key->pn, enc_key->pn_len);
@@ -814,7 +818,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
memset(&key_material->key_param_set, 0,
sizeof(struct mwifiex_ie_type_key_param_set));
if (enc_key->is_wapi_key) {
- dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_WAPI);
if (cmd_oid == KEY_INFO_ENABLED)
@@ -860,7 +864,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
}
if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
if (enc_key->is_igtk_key) {
- dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+ mwifiex_dbg(priv->adapter, CMD, "cmd: CMAC_AES\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
if (cmd_oid == KEY_INFO_ENABLED)
@@ -873,7 +877,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
key_material->key_param_set.key_info |=
cpu_to_le16(KEY_IGTK);
} else {
- dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+ mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_AES\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_AES);
if (cmd_oid == KEY_INFO_ENABLED)
@@ -892,7 +896,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
cpu_to_le16(KEY_MCAST);
}
} else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
- dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
+ mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_TKIP\n");
key_material->key_param_set.key_type_id =
cpu_to_le16(KEY_TYPE_ID_TKIP);
key_material->key_param_set.key_info =
@@ -999,7 +1003,8 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
&domain_info->domain;
u8 no_of_triplet = adapter->domain_reg.no_of_triplet;
- dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
+ mwifiex_dbg(adapter, INFO,
+ "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO);
domain_info->action = cpu_to_le16(cmd_action);
@@ -1071,6 +1076,26 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
return 0;
}
+/* This function prepares command buffer to get/set memory location value.
+ */
+static int
+mwifiex_cmd_mem_access(struct host_cmd_ds_command *cmd, u16 cmd_action,
+ void *pdata_buf)
+{
+ struct mwifiex_ds_mem_rw *mem_rw = (void *)pdata_buf;
+ struct host_cmd_ds_mem_access *mem_access = (void *)&cmd->params.mem;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_MEM_ACCESS);
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mem_access) +
+ S_DS_GEN);
+
+ mem_access->action = cpu_to_le16(cmd_action);
+ mem_access->addr = cpu_to_le32(mem_rw->addr);
+ mem_access->value = cpu_to_le32(mem_rw->value);
+
+ return 0;
+}
+
/*
* This function prepares command to set/get register value.
*
@@ -1215,8 +1240,9 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
(u32)(card->sleep_cookie_pbase);
host_spec->sleep_cookie_addr_hi =
(u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
- dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n",
- host_spec->sleep_cookie_addr_lo);
+ mwifiex_dbg(priv->adapter, INFO,
+ "sleep_cook_lo phy addr: 0x%x\n",
+ host_spec->sleep_cookie_addr_lo);
}
return 0;
@@ -1243,7 +1269,8 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
S_DS_GEN);
subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
- dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: action: %d\n", subsc_evt_cfg->action);
/*For query requests, no configuration TLV structures are to be added.*/
if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
@@ -1252,14 +1279,15 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
event_bitmap = subsc_evt_cfg->events;
- dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
- event_bitmap);
+ mwifiex_dbg(priv->adapter, CMD, "cmd: event bitmap : %16x\n",
+ event_bitmap);
if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
(subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
(event_bitmap == 0)) {
- dev_dbg(priv->adapter->dev, "Error: No event specified "
- "for bitwise action type\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Error: No event specified\t"
+ "for bitwise action type\n");
return -EINVAL;
}
@@ -1284,10 +1312,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
- dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
- "RSSI:-%d dBm, Freq:%d\n",
- subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
- subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
+ mwifiex_dbg(priv->adapter, EVENT,
+ "Cfg Beacon Low Rssi event,\t"
+ "RSSI:-%d dBm, Freq:%d\n",
+ subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
+ subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
le16_add_cpu(&cmd->size,
@@ -1304,10 +1333,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
- dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
- "RSSI:-%d dBm, Freq:%d\n",
- subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
- subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
+ mwifiex_dbg(priv->adapter, EVENT,
+ "Cfg Beacon High Rssi event,\t"
+ "RSSI:-%d dBm, Freq:%d\n",
+ subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
+ subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
le16_add_cpu(&cmd->size,
@@ -1463,12 +1493,14 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
data, len);
if (ret)
return ret;
- dev_dbg(adapter->dev,
- "download cfg_data from device tree: %s\n", prop->name);
+ mwifiex_dbg(adapter, INFO,
+ "download cfg_data from device tree: %s\n",
+ prop->name);
} else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
adapter->cal_data->size, data);
- dev_dbg(adapter->dev, "download cfg_data from config file\n");
+ mwifiex_dbg(adapter, INFO,
+ "download cfg_data from config file\n");
} else {
return -1;
}
@@ -1583,9 +1615,9 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
if (!params) {
- dev_err(priv->adapter->dev,
- "TDLS config params not available for %pM\n",
- oper->peer_mac);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS config params not available for %pM\n",
+ oper->peer_mac);
return -ENODATA;
}
@@ -1663,7 +1695,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
break;
default:
- dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n");
return -ENOTSUPP;
}
@@ -1870,8 +1902,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf);
break;
case HostCmd_CMD_WMM_GET_STATUS:
- dev_dbg(priv->adapter->dev,
- "cmd: WMM: WMM_GET_STATUS cmd sent\n");
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: WMM: WMM_GET_STATUS cmd sent\n");
cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS);
cmd_ptr->size =
cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) +
@@ -1885,6 +1917,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_802_11_SCAN_EXT:
ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_MEM_ACCESS:
+ ret = mwifiex_cmd_mem_access(cmd_ptr, cmd_action, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1932,8 +1967,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
data_buf);
break;
default:
- dev_err(priv->adapter->dev,
- "PREP_CMD: unknown cmd- %#x\n", cmd_no);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "PREP_CMD: unknown cmd- %#x\n", cmd_no);
ret = -1;
break;
}
@@ -2024,8 +2059,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
&sdio_sp_rx_aggr_enable,
true);
if (ret) {
- dev_err(priv->adapter->dev,
- "error while enabling SP aggregation..disable it");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "error while enabling SP aggregation..disable it");
adapter->sdio_rx_aggr_enable = false;
}
}
@@ -2108,8 +2143,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
HostCmd_ACT_GEN_SET, DOT11D_I,
&state_11d, true);
if (ret)
- dev_err(priv->adapter->dev,
- "11D: failed to enable 11D\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: failed to enable 11D\n");
}
/* Send cmd to FW to configure 11n specific configuration
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 88dc6b672ef4..aa5b9a310340 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -49,8 +49,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
struct host_cmd_ds_802_11_ps_mode_enh *pm;
unsigned long flags;
- dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
- resp->command, resp->result);
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: cmd %#x error, result=%#x\n",
+ resp->command, resp->result);
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
@@ -58,9 +59,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
switch (le16_to_cpu(resp->command)) {
case HostCmd_CMD_802_11_PS_MODE_ENH:
pm = &resp->params.psmode_enh;
- dev_err(adapter->dev,
- "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
- resp->result, le16_to_cpu(pm->action));
+ mwifiex_dbg(adapter, ERROR,
+ "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
+ resp->result, le16_to_cpu(pm->action));
/* We do not re-try enter-ps command in ad-hoc mode. */
if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
(le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
@@ -91,7 +92,8 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
- dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+ mwifiex_dbg(adapter, MSG,
+ "SDIO RX single-port aggregation Not support\n");
break;
default:
@@ -187,29 +189,34 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
u16 query_type = le16_to_cpu(smib->query_type);
u32 ul_temp;
- dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
- " query_type = %#x, buf size = %#x\n",
- oid, query_type, le16_to_cpu(smib->buf_size));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: oid value = %#x,\t"
+ "query_type = %#x, buf size = %#x\n",
+ oid, query_type, le16_to_cpu(smib->buf_size));
if (query_type == HostCmd_ACT_GEN_GET) {
ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
if (data_buf)
*data_buf = ul_temp;
switch (oid) {
case FRAG_THRESH_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: FragThsd =%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: FragThsd =%u\n",
+ ul_temp);
break;
case RTS_THRESH_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: RTSThsd =%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: RTSThsd =%u\n",
+ ul_temp);
break;
case SHORT_RETRY_LIM_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: TxRetryCount=%u\n",
+ ul_temp);
break;
case DTIM_PERIOD_I:
- dev_dbg(priv->adapter->dev,
- "info: SNMP_RESP: DTIM period=%u\n", ul_temp);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: SNMP_RESP: DTIM period=%u\n",
+ ul_temp);
default:
break;
}
@@ -426,14 +433,15 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
priv->tx_power_level = (u16) pg->power_min;
break;
default:
- dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
- action);
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: unknown cmd action %d\n",
+ action);
return 0;
}
- dev_dbg(adapter->dev,
- "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
- priv->tx_power_level, priv->max_tx_power_level,
- priv->min_tx_power_level);
+ mwifiex_dbg(adapter, INFO,
+ "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
+ priv->tx_power_level, priv->max_tx_power_level,
+ priv->min_tx_power_level);
return 0;
}
@@ -454,10 +462,10 @@ static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
priv->min_tx_power_level = txp->min_power;
}
- dev_dbg(priv->adapter->dev,
- "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
- priv->tx_power_level, priv->max_tx_power_level,
- priv->min_tx_power_level);
+ mwifiex_dbg(priv->adapter, INFO,
+ "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
+ priv->tx_power_level, priv->max_tx_power_level,
+ priv->min_tx_power_level);
return 0;
}
@@ -473,18 +481,18 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
- dev_dbg(adapter->dev,
- "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
- " Rx action = 0x%x, Rx Mode = 0x%04x\n",
- le16_to_cpu(ant_mimo->action_tx),
- le16_to_cpu(ant_mimo->tx_ant_mode),
- le16_to_cpu(ant_mimo->action_rx),
- le16_to_cpu(ant_mimo->rx_ant_mode));
+ mwifiex_dbg(adapter, INFO,
+ "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t"
+ "Rx action = 0x%x, Rx Mode = 0x%04x\n",
+ le16_to_cpu(ant_mimo->action_tx),
+ le16_to_cpu(ant_mimo->tx_ant_mode),
+ le16_to_cpu(ant_mimo->action_rx),
+ le16_to_cpu(ant_mimo->rx_ant_mode));
else
- dev_dbg(adapter->dev,
- "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
- le16_to_cpu(ant_siso->action),
- le16_to_cpu(ant_siso->ant_mode));
+ mwifiex_dbg(adapter, INFO,
+ "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
+ le16_to_cpu(ant_siso->action),
+ le16_to_cpu(ant_siso->ant_mode));
return 0;
}
@@ -502,8 +510,8 @@ static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv,
memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
- dev_dbg(priv->adapter->dev,
- "info: set mac address: %pM\n", priv->curr_addr);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: set mac address: %pM\n", priv->curr_addr);
return 0;
}
@@ -587,7 +595,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
- dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
}
@@ -617,7 +626,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
key_v2 = &resp->params.key_material_v2;
if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
- dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
priv->wpa_is_gtk_set = true;
priv->scan_block = false;
}
@@ -663,14 +672,14 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
- IEEE80211_COUNTRY_STRING_LEN)
/ sizeof(struct ieee80211_country_ie_triplet));
- dev_dbg(priv->adapter->dev,
- "info: 11D Domain Info Resp: no_of_triplet=%d\n",
- no_of_triplet);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: 11D Domain Info Resp: no_of_triplet=%d\n",
+ no_of_triplet);
if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
- dev_warn(priv->adapter->dev,
- "11D: invalid number of triplets %d returned\n",
- no_of_triplet);
+ mwifiex_dbg(priv->adapter, FATAL,
+ "11D: invalid number of triplets %d returned\n",
+ no_of_triplet);
return -1;
}
@@ -680,8 +689,8 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
case HostCmd_ACT_GEN_GET:
break;
default:
- dev_err(priv->adapter->dev,
- "11D: invalid action:%d\n", domain_info->action);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: invalid action:%d\n", domain_info->action);
return -1;
}
@@ -741,6 +750,19 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
return 0;
}
+/* This function handles the command response of mem_access command
+ */
+static int
+mwifiex_ret_mem_access(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp, void *pioctl_buf)
+{
+ struct host_cmd_ds_mem_access *mem = (void *)&resp->params.mem;
+
+ priv->mem_rw.addr = le32_to_cpu(mem->addr);
+ priv->mem_rw.value = le32_to_cpu(mem->value);
+
+ return 0;
+}
/*
* This function handles the command response of register access.
*
@@ -830,12 +852,12 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
return 0;
- dev_dbg(priv->adapter->dev,
- "info: new BSSID %pM\n", ibss_coal_resp->bssid);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: new BSSID %pM\n", ibss_coal_resp->bssid);
/* If rsp has NULL BSSID, Just return..... No Action */
if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
- dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
+ mwifiex_dbg(priv->adapter, FATAL, "new BSSID is NULL\n");
return 0;
}
@@ -871,48 +893,48 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
case ACT_TDLS_DELETE:
if (reason) {
if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
- dev_dbg(priv->adapter->dev,
- "TDLS link delete for %pM failed: reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
else
- dev_err(priv->adapter->dev,
- "TDLS link delete for %pM failed: reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
} else {
- dev_dbg(priv->adapter->dev,
- "TDLS link delete for %pM successful\n",
- cmd_tdls_oper->peer_mac);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS link delete for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
}
break;
case ACT_TDLS_CREATE:
if (reason) {
- dev_err(priv->adapter->dev,
- "TDLS link creation for %pM failed: reason %d",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link creation for %pM failed: reason %d",
+ cmd_tdls_oper->peer_mac, reason);
if (node && reason != TDLS_ERR_LINK_EXISTS)
node->tdls_status = TDLS_SETUP_FAILURE;
} else {
- dev_dbg(priv->adapter->dev,
- "TDLS link creation for %pM successful",
- cmd_tdls_oper->peer_mac);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS link creation for %pM successful",
+ cmd_tdls_oper->peer_mac);
}
break;
case ACT_TDLS_CONFIG:
if (reason) {
- dev_err(priv->adapter->dev,
- "TDLS link config for %pM failed, reason %d\n",
- cmd_tdls_oper->peer_mac, reason);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS link config for %pM failed, reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
if (node)
node->tdls_status = TDLS_SETUP_FAILURE;
} else {
- dev_dbg(priv->adapter->dev,
- "TDLS link config for %pM successful\n",
- cmd_tdls_oper->peer_mac);
+ mwifiex_dbg(priv->adapter, MSG,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
}
break;
default:
- dev_err(priv->adapter->dev,
- "Unknown TDLS command action response %d", action);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Unknown TDLS command action response %d", action);
return -1;
}
@@ -929,8 +951,9 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
/* For every subscribe event command (Get/Set/Clear), FW reports the
* current set of subscribed events*/
- dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
- le16_to_cpu(cmd_sub_event->events));
+ mwifiex_dbg(priv->adapter, EVENT,
+ "Bitmap of currently subscribed events: %16x\n",
+ le16_to_cpu(cmd_sub_event->events));
return 0;
}
@@ -940,7 +963,7 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
if (resp->result != HostCmd_RESULT_OK) {
- dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Cal data cmd resp failed\n");
return -1;
}
@@ -1008,8 +1031,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
- dev_dbg(adapter->dev,
- "info: CMD_RESP: BG_SCAN result is ready!\n");
+ mwifiex_dbg(adapter, CMD,
+ "info: CMD_RESP: BG_SCAN result is ready!\n");
break;
case HostCmd_CMD_TXPWR_CFG:
ret = mwifiex_ret_tx_power_cfg(priv, resp);
@@ -1088,8 +1111,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
/ MWIFIEX_SDIO_BLOCK_SIZE)
* MWIFIEX_SDIO_BLOCK_SIZE;
adapter->curr_tx_buf_size = adapter->tx_buf_size;
- dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
- adapter->curr_tx_buf_size);
+ mwifiex_dbg(adapter, CMD, "cmd: curr_tx_buf_size=%d\n",
+ adapter->curr_tx_buf_size);
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
@@ -1103,6 +1126,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
ret = mwifiex_ret_ibss_coalescing_status(priv, resp);
break;
+ case HostCmd_CMD_MEM_ACCESS:
+ ret = mwifiex_ret_mem_access(priv, resp, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1146,8 +1172,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
break;
default:
- dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
- resp->command);
+ mwifiex_dbg(adapter, ERROR,
+ "CMD_RESP: unknown cmd response %#x\n",
+ resp->command);
break;
}
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 0dc7a1d3993d..95203780010a 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -48,7 +48,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (!priv->media_connected)
return;
- dev_dbg(adapter->dev, "info: handles disconnect event\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: handles disconnect event\n");
priv->media_connected = false;
@@ -104,12 +105,14 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
* it could be used for re-assoc
*/
- dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
- priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: previous SSID=%s, SSID len=%u\n",
+ priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
- dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
- priv->curr_bss_params.bss_descriptor.ssid.ssid,
- priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+ mwifiex_dbg(adapter, INFO,
+ "info: current SSID=%s, SSID len=%u\n",
+ priv->curr_bss_params.bss_descriptor.ssid.ssid,
+ priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
memcpy(&priv->prev_ssid,
&priv->curr_bss_params.bss_descriptor.ssid,
@@ -127,13 +130,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (adapter->is_cmd_timedout && adapter->curr_cmd)
return;
priv->media_connected = false;
- dev_dbg(adapter->dev,
- "info: successfully disconnected from %pM: reason code %d\n",
- priv->cfg_bssid, reason_code);
+ mwifiex_dbg(adapter, MSG,
+ "info: successfully disconnected from %pM: reason code %d\n",
+ priv->cfg_bssid, reason_code);
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- GFP_KERNEL);
+ false, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
@@ -154,13 +157,13 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
/* reserved 2 bytes are not mandatory in tdls event */
if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
sizeof(u16) - sizeof(adapter->event_cause))) {
- dev_err(adapter->dev, "Invalid event length!\n");
+ mwifiex_dbg(adapter, ERROR, "Invalid event length!\n");
return -1;
}
sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
if (!sta_ptr) {
- dev_err(adapter->dev, "cannot get sta entry!\n");
+ mwifiex_dbg(adapter, ERROR, "cannot get sta entry!\n");
return -1;
}
@@ -239,21 +242,21 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
- dev_err(adapter->dev,
- "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
+ mwifiex_dbg(adapter, ERROR,
+ "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
break;
case EVENT_LINK_SENSED:
- dev_dbg(adapter->dev, "event: LINK_SENSED\n");
+ mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n");
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_DEAUTHENTICATED:
- dev_dbg(adapter->dev, "event: Deauthenticated\n");
+ mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n");
if (priv->wps.session_enable) {
- dev_dbg(adapter->dev,
- "info: receive deauth event in wps session\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: receive deauth event in wps session\n");
break;
}
adapter->dbg.num_event_deauth++;
@@ -265,10 +268,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_DISASSOCIATED:
- dev_dbg(adapter->dev, "event: Disassociated\n");
+ mwifiex_dbg(adapter, EVENT, "event: Disassociated\n");
if (priv->wps.session_enable) {
- dev_dbg(adapter->dev,
- "info: receive disassoc event in wps session\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: receive disassoc event in wps session\n");
break;
}
adapter->dbg.num_event_disassoc++;
@@ -280,7 +283,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_LINK_LOST:
- dev_dbg(adapter->dev, "event: Link lost\n");
+ mwifiex_dbg(adapter, EVENT, "event: Link lost\n");
adapter->dbg.num_event_link_lost++;
if (priv->media_connected) {
reason_code =
@@ -290,7 +293,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_PS_SLEEP:
- dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
adapter->ps_state = PS_STATE_PRE_SLEEP;
@@ -298,12 +301,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_PS_AWAKE:
- dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
if (!adapter->pps_uapsd_mode &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
- dev_dbg(adapter->dev,
- "event: PPS/UAPSD mode activated\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: PPS/UAPSD mode activated\n");
}
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -333,26 +336,26 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DEEP_SLEEP_AWAKE:
adapter->if_ops.wakeup_complete(adapter);
- dev_dbg(adapter->dev, "event: DS_AWAKE\n");
+ mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n");
if (adapter->is_deep_sleep)
adapter->is_deep_sleep = false;
break;
case EVENT_HS_ACT_REQ:
- dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
+ mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n");
ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
0, 0, NULL, false);
break;
case EVENT_MIC_ERR_UNICAST:
- dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+ mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n");
cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
NL80211_KEYTYPE_PAIRWISE,
-1, NULL, GFP_KERNEL);
break;
case EVENT_MIC_ERR_MULTICAST:
- dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+ mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n");
cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
NL80211_KEYTYPE_GROUP,
-1, NULL, GFP_KERNEL);
@@ -362,7 +365,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_ADHOC_BCN_LOST:
- dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
+ mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n");
priv->adhoc_is_link_sensed = false;
mwifiex_clean_txrx(priv);
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -371,17 +374,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_BG_SCAN_REPORT:
- dev_dbg(adapter->dev, "event: BGS_REPORT\n");
+ mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n");
ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_PORT_RELEASE:
- dev_dbg(adapter->dev, "event: PORT RELEASE\n");
+ mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
break;
case EVENT_EXT_SCAN_REPORT:
- dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
if (adapter->ext_scan)
ret = mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
@@ -389,7 +392,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_WMM_STATUS_CHANGE:
- dev_dbg(adapter->dev, "event: WMM status changed\n");
+ mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n");
ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
0, 0, NULL, false);
break;
@@ -401,13 +404,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
- dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n");
break;
case EVENT_SNR_LOW:
- dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n");
break;
case EVENT_MAX_FAIL:
- dev_dbg(adapter->dev, "event: MAX_FAIL\n");
+ mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n");
break;
case EVENT_RSSI_HIGH:
cfg80211_cqm_rssi_notify(priv->netdev,
@@ -416,47 +419,47 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
HostCmd_ACT_GEN_GET, 0, NULL, false);
priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
- dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n");
break;
case EVENT_SNR_HIGH:
- dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n");
break;
case EVENT_DATA_RSSI_LOW:
- dev_dbg(adapter->dev, "event: Data RSSI_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n");
break;
case EVENT_DATA_SNR_LOW:
- dev_dbg(adapter->dev, "event: Data SNR_LOW\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n");
break;
case EVENT_DATA_RSSI_HIGH:
- dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n");
break;
case EVENT_DATA_SNR_HIGH:
- dev_dbg(adapter->dev, "event: Data SNR_HIGH\n");
+ mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n");
break;
case EVENT_LINK_QUALITY:
- dev_dbg(adapter->dev, "event: Link Quality\n");
+ mwifiex_dbg(adapter, EVENT, "event: Link Quality\n");
break;
case EVENT_PRE_BEACON_LOST:
- dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n");
+ mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n");
break;
case EVENT_IBSS_COALESCED:
- dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
+ mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n");
ret = mwifiex_send_cmd(priv,
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
case EVENT_ADDBA:
- dev_dbg(adapter->dev, "event: ADDBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
HostCmd_ACT_GEN_SET, 0,
adapter->event_body, false);
break;
case EVENT_DELBA:
- dev_dbg(adapter->dev, "event: DELBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
break;
case EVENT_BA_STREAM_TIEMOUT:
- dev_dbg(adapter->dev, "event: BA Stream timeout\n");
+ mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n");
mwifiex_11n_ba_stream_timeout(priv,
(struct host_cmd_ds_11n_batimeout
*)
@@ -464,28 +467,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_AMSDU_AGGR_CTRL:
ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
- dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+ mwifiex_dbg(adapter, EVENT,
+ "event: AMSDU_AGGR_CTRL %d\n", ctrl);
adapter->tx_buf_size =
min_t(u16, adapter->curr_tx_buf_size, ctrl);
- dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
- adapter->tx_buf_size);
+ mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n",
+ adapter->tx_buf_size);
break;
case EVENT_WEP_ICV_ERR:
- dev_dbg(adapter->dev, "event: WEP ICV error\n");
+ mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n");
break;
case EVENT_BW_CHANGE:
- dev_dbg(adapter->dev, "event: BW Change\n");
+ mwifiex_dbg(adapter, EVENT, "event: BW Change\n");
break;
case EVENT_HOSTWAKE_STAIE:
- dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "event: HOSTWAKE_STAIE %d\n", eventcause);
break;
case EVENT_REMAIN_ON_CHAN_EXPIRED:
- dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: Remain on channel expired\n");
cfg80211_remain_on_channel_expired(&priv->wdev,
priv->roc_cfg.cookie,
&priv->roc_cfg.chan,
@@ -496,7 +502,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_CHANNEL_SWITCH_ANN:
- dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
+ mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n");
priv->csa_expire_time =
jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
@@ -511,23 +517,23 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_TX_STATUS_REPORT:
- dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
case EVENT_CHANNEL_REPORT_RDY:
- dev_dbg(adapter->dev, "event: Channel Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
ret = mwifiex_11h_handle_chanrpt_ready(priv,
adapter->event_skb);
break;
case EVENT_RADAR_DETECTED:
- dev_dbg(adapter->dev, "event: Radar detected\n");
+ mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
ret = mwifiex_11h_handle_radar_detected(priv,
adapter->event_skb);
break;
default:
- dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
- eventcause);
+ mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
+ eventcause);
break;
}
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 0599e41e253c..d8b7d9c20450 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -64,7 +64,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
*(cmd_queued->condition),
(12 * HZ));
if (status <= 0) {
- dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+ if (status == 0)
+ status = -ETIMEDOUT;
+ mwifiex_dbg(adapter, ERROR,
+ "cmd_wait_q terminated: %d\n", status);
mwifiex_cancel_all_pending_cmd(adapter);
return status;
}
@@ -91,7 +94,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
old_pkt_filter = priv->curr_pkt_filter;
if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
- dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Enable Promiscuous mode\n");
priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
@@ -99,16 +103,16 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
/* Multicast */
priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
- dev_dbg(priv->adapter->dev,
- "info: Enabling All Multicast!\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Enabling All Multicast!\n");
priv->curr_pkt_filter |=
HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
} else {
priv->curr_pkt_filter &=
~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
- dev_dbg(priv->adapter->dev,
- "info: Set multicast list=%d\n",
- mcast_list->num_multicast_addr);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
/* Send multicast addresses to firmware */
ret = mwifiex_send_cmd(priv,
HostCmd_CMD_MAC_MULTICAST_ADR,
@@ -116,9 +120,9 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
mcast_list, false);
}
}
- dev_dbg(priv->adapter->dev,
- "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
- old_pkt_filter, priv->curr_pkt_filter);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
+ old_pkt_filter, priv->curr_pkt_filter);
if (old_pkt_filter != priv->curr_pkt_filter) {
ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
HostCmd_ACT_GEN_SET,
@@ -151,7 +155,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
rcu_read_unlock();
if (!beacon_ie) {
- dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ " failed to alloc beacon_ie\n");
return -ENOMEM;
}
@@ -165,7 +170,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
bss_desc->bss_band = bss_priv->band;
bss_desc->fw_tsf = bss_priv->fw_tsf;
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
- dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: InterpretIE: AP WEP enabled\n");
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
} else {
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
@@ -219,8 +225,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
rcu_read_unlock();
- wiphy_dbg(priv->wdev.wiphy,
- "11D: skip setting domain info in FW\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "11D: skip setting domain info in FW\n");
return 0;
}
memcpy(priv->adapter->country_code, &country_ie[2], 2);
@@ -241,8 +247,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- wiphy_err(priv->adapter->wiphy,
- "11D: setting domain info in FW\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "11D: setting domain info in FW fail\n");
return -1;
}
@@ -304,14 +310,15 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (mwifiex_11h_get_csa_closed_channel(priv) ==
(u8)bss_desc->channel) {
- dev_err(adapter->dev,
- "Attempt to reconnect on csa closed chan(%d)\n",
- bss_desc->channel);
+ mwifiex_dbg(adapter, ERROR,
+ "Attempt to reconnect on csa closed chan(%d)\n",
+ bss_desc->channel);
goto done;
}
- dev_dbg(adapter->dev, "info: SSID found in scan list ... "
- "associating...\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: SSID found in scan list ...\t"
+ "associating...\n");
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
@@ -353,15 +360,17 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
netif_carrier_off(priv->netdev);
if (!ret) {
- dev_dbg(adapter->dev, "info: network found in scan"
- " list. Joining...\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: network found in scan\t"
+ " list. Joining...\n");
ret = mwifiex_adhoc_join(priv, bss_desc);
if (bss)
cfg80211_put_bss(priv->adapter->wiphy, bss);
} else {
- dev_dbg(adapter->dev, "info: Network not found in "
- "the list, creating adhoc with ssid = %s\n",
- req_ssid->ssid);
+ mwifiex_dbg(adapter, INFO,
+ "info: Network not found in\t"
+ "the list, creating adhoc with ssid = %s\n",
+ req_ssid->ssid);
ret = mwifiex_adhoc_start(priv, req_ssid);
}
}
@@ -396,8 +405,9 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
switch (action) {
case HostCmd_ACT_GEN_SET:
if (adapter->pps_uapsd_mode) {
- dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
- " is blocked in UAPSD/PPS mode\n");
+ mwifiex_dbg(adapter, INFO,
+ "info: Host Sleep IOCTL\t"
+ "is blocked in UAPSD/PPS mode\n");
status = -1;
break;
}
@@ -494,7 +504,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
}
if (adapter->hs_activated) {
- dev_dbg(adapter->dev, "cmd: HS Already activated\n");
+ mwifiex_dbg(adapter, CMD,
+ "cmd: HS Already activated\n");
return true;
}
@@ -510,14 +521,16 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
MWIFIEX_BSS_ROLE_STA),
HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
&hscfg)) {
- dev_err(adapter->dev, "IOCTL request HS enable failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "IOCTL request HS enable failed\n");
return false;
}
if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
adapter->hs_activate_wait_q_woken,
(10 * HZ)) <= 0) {
- dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+ mwifiex_dbg(adapter, ERROR,
+ "hs_activate_wait_q terminated\n");
return false;
}
@@ -637,10 +650,11 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
dbm = (u16) power_cfg->power_level;
if ((dbm < priv->min_tx_power_level) ||
(dbm > priv->max_tx_power_level)) {
- dev_err(priv->adapter->dev, "txpower value %d dBm"
- " is out of range (%d dBm-%d dBm)\n",
- dbm, priv->min_tx_power_level,
- priv->max_tx_power_level);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "txpower value %d dBm\t"
+ "is out of range (%d dBm-%d dBm)\n",
+ dbm, priv->min_tx_power_level,
+ priv->max_tx_power_level);
return -1;
}
}
@@ -739,14 +753,15 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
{
if (ie_len) {
if (ie_len > sizeof(priv->wpa_ie)) {
- dev_err(priv->adapter->dev,
- "failed to copy WPA IE, too big\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "failed to copy WPA IE, too big\n");
return -1;
}
memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
priv->wpa_ie_len = (u8) ie_len;
- dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
- priv->wpa_ie_len, priv->wpa_ie[0]);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Set Wpa_ie_len=%d IE=%#x\n",
+ priv->wpa_ie_len, priv->wpa_ie[0]);
if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
priv->sec_info.wpa_enabled = true;
@@ -759,8 +774,9 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
} else {
memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
priv->wpa_ie_len = 0;
- dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
- priv->wpa_ie_len, priv->wpa_ie[0]);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: reset wpa_ie_len=%d IE=%#x\n",
+ priv->wpa_ie_len, priv->wpa_ie[0]);
priv->sec_info.wpa_enabled = false;
priv->sec_info.wpa2_enabled = false;
}
@@ -780,23 +796,24 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
{
if (ie_len) {
if (ie_len > sizeof(priv->wapi_ie)) {
- dev_dbg(priv->adapter->dev,
- "info: failed to copy WAPI IE, too big\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: failed to copy WAPI IE, too big\n");
return -1;
}
memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
priv->wapi_ie_len = ie_len;
- dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
- priv->wapi_ie_len, priv->wapi_ie[0]);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Set wapi_ie_len=%d IE=%#x\n",
+ priv->wapi_ie_len, priv->wapi_ie[0]);
if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
priv->sec_info.wapi_enabled = true;
} else {
memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
priv->wapi_ie_len = ie_len;
- dev_dbg(priv->adapter->dev,
- "info: Reset wapi_ie_len=%d IE=%#x\n",
- priv->wapi_ie_len, priv->wapi_ie[0]);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Reset wapi_ie_len=%d IE=%#x\n",
+ priv->wapi_ie_len, priv->wapi_ie[0]);
priv->sec_info.wapi_enabled = false;
}
return 0;
@@ -814,8 +831,8 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
{
if (ie_len) {
if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
- dev_dbg(priv->adapter->dev,
- "info: failed to copy WPS IE, too big\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "info: failed to copy WPS IE, too big\n");
return -1;
}
@@ -825,13 +842,14 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
memcpy(priv->wps_ie, ie_data_ptr, ie_len);
priv->wps_ie_len = ie_len;
- dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
- priv->wps_ie_len, priv->wps_ie[0]);
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Set wps_ie_len=%d IE=%#x\n",
+ priv->wps_ie_len, priv->wps_ie[0]);
} else {
kfree(priv->wps_ie);
priv->wps_ie_len = ie_len;
- dev_dbg(priv->adapter->dev,
- "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
}
return 0;
}
@@ -875,8 +893,8 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
- dev_err(adapter->dev,
- "key not set, so cannot enable it\n");
+ mwifiex_dbg(adapter, ERROR,
+ "key not set, so cannot enable it\n");
return -1;
}
@@ -953,7 +971,8 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
/* Current driver only supports key length of up to 32 bytes */
if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) {
- dev_err(priv->adapter->dev, "key length too long\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "key length too long\n");
return -1;
}
@@ -1040,7 +1059,7 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
snprintf(version, max_len, driver_version, fw_ver);
- dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
+ mwifiex_dbg(adapter, MSG, "info: MWIFIEX VERSION: %s\n", version);
return 0;
}
@@ -1128,7 +1147,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
action, 0, &roc_cfg, true)) {
- dev_err(priv->adapter->dev, "failed to remain on channel\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "failed to remain on channel\n");
return -1;
}
@@ -1313,8 +1333,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
(!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) {
priv->wps.session_enable = true;
- dev_dbg(priv->adapter->dev,
- "info: WPS Session Enabled.\n");
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WPS Session Enabled.\n");
ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
}
@@ -1361,7 +1381,8 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
adapter->arp_filter_size = 0;
- dev_err(adapter->dev, "invalid ARP filter size\n");
+ mwifiex_dbg(adapter, ERROR,
+ "invalid ARP filter size\n");
return -1;
} else {
memcpy(adapter->arp_filter, gen_ie->ie_data,
@@ -1370,7 +1391,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
}
break;
default:
- dev_err(adapter->dev, "invalid IE type\n");
+ mwifiex_dbg(adapter, ERROR, "invalid IE type\n");
return -1;
}
return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index b8729c9394e9..d4d4cb1ce95b 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -141,7 +141,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
if (priv->hs2_enabled &&
mwifiex_discard_gratuitous_arp(priv, skb)) {
- dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
+ mwifiex_dbg(priv->adapter, INFO, "Bypassed Gratuitous ARP\n");
dev_kfree_skb_any(skb);
return 0;
}
@@ -166,7 +166,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
ret = mwifiex_recv_packet(priv, skb);
if (ret == -1)
- dev_err(priv->adapter->dev, "recv packet failed\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "recv packet failed\n");
return ret;
}
@@ -203,9 +204,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
- dev_err(adapter->dev,
- "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
- skb->len, rx_pkt_offset, rx_pkt_length);
+ mwifiex_dbg(adapter, ERROR,
+ "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+ skb->len, rx_pkt_offset, rx_pkt_length);
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return ret;
@@ -214,7 +215,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
- dev_err(adapter->dev, "Rx of mgmt packet failed");
+ mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed");
dev_kfree_skb_any(skb);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5ce2d9a4f919..355ac5904fac 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -53,7 +53,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
INTF_HEADER_LEN;
if (!skb->len) {
- dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Tx: bad packet length: %d\n", skb->len);
tx_info->status_code = -1;
return skb->data;
}
@@ -184,21 +185,24 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
switch (ret) {
case -EBUSY:
dev_kfree_skb_any(skb);
- dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
- __func__, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: host_to_card failed: ret=%d\n",
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
break;
case -1:
adapter->data_sent = false;
dev_kfree_skb_any(skb);
- dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
- __func__, ret);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: host_to_card failed: ret=%d\n",
+ __func__, ret);
adapter->dbg.num_tx_host_to_card_failure++;
break;
case 0:
dev_kfree_skb_any(skb);
- dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
- __func__);
+ mwifiex_dbg(adapter, DATA,
+ "data: %s: host_to_card succeeded\n",
+ __func__);
adapter->tx_lock_flag = true;
break;
case -EINPROGRESS:
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 087d84762cd3..2faa1bc42abe 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -37,7 +37,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
u32 tid;
u8 tid_down;
- dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
@@ -94,7 +94,7 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
unsigned long flags;
int i;
- dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
for (i = 0; i < MAX_NUM_TID; i++) {
@@ -132,8 +132,8 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
if (skb_tailroom(skb) < rates_size + 4) {
- dev_err(priv->adapter->dev,
- "Insuffient space while adding rates\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Insuffient space while adding rates\n");
return -ENOMEM;
}
@@ -199,8 +199,8 @@ mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (unlikely(!sta_ptr)) {
- dev_warn(priv->adapter->dev,
- "TDLS peer station not found in list\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "TDLS peer station not found in list\n");
return -1;
}
@@ -247,15 +247,16 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
sta_ptr = mwifiex_get_sta_entry(priv, mac);
if (unlikely(!sta_ptr)) {
- dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+ mwifiex_dbg(adapter, ERROR,
+ "TDLS peer station not found in list\n");
return -1;
}
if (!mwifiex_is_bss_in_11ac_mode(priv)) {
if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
- dev_dbg(adapter->dev,
- "TDLS peer doesn't support wider bandwitdh\n");
+ mwifiex_dbg(adapter, WARN,
+ "TDLS peer doesn't support wider bandwidth\n");
return 0;
}
} else {
@@ -554,7 +555,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
tf->u.discover_req.dialog_token = dialog_token;
break;
default:
- dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
return -EINVAL;
}
@@ -608,8 +609,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
skb = dev_alloc_skb(skb_len);
if (!skb) {
- dev_err(priv->adapter->dev,
- "allocate skb failed for management frame\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
return -ENOMEM;
}
skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
@@ -742,7 +743,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
mwifiex_tdls_add_qos_capab(skb);
break;
default:
- dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n");
return -EINVAL;
}
@@ -781,8 +782,8 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
skb = dev_alloc_skb(skb_len);
if (!skb) {
- dev_err(priv->adapter->dev,
- "allocate skb failed for management frame\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "allocate skb failed for management frame\n");
return -ENOMEM;
}
@@ -848,8 +849,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
peer = buf + ETH_ALEN;
action = *(buf + sizeof(struct ethhdr) + 2);
- dev_dbg(priv->adapter->dev,
- "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+ mwifiex_dbg(priv->adapter, DATA,
+ "rx:tdls action: peer=%pM, action=%d\n", peer, action);
switch (action) {
case WLAN_TDLS_SETUP_REQUEST:
@@ -880,7 +881,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
break;
default:
- dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
return;
}
@@ -967,8 +968,8 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
- dev_err(priv->adapter->dev,
- "link absent for peer %pM; cannot config\n", peer);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "link absent for peer %pM; cannot config\n", peer);
return -EINVAL;
}
@@ -988,8 +989,8 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
- dev_dbg(priv->adapter->dev,
- "Setup already in progress for peer %pM\n", peer);
+ mwifiex_dbg(priv->adapter, WARN,
+ "Setup already in progress for peer %pM\n", peer);
return 0;
}
@@ -1046,8 +1047,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
- dev_dbg(priv->adapter->dev,
- "tdls: enable link %pM success\n", peer);
+ mwifiex_dbg(priv->adapter, MSG,
+ "tdls: enable link %pM success\n", peer);
sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
@@ -1076,8 +1077,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
mwifiex_auto_tdls_update_peer_status(priv, peer,
TDLS_SETUP_COMPLETE);
} else {
- dev_dbg(priv->adapter->dev,
- "tdls: enable link %pM failed\n", peer);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "tdls: enable link %pM failed\n", peer);
if (sta_ptr) {
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
@@ -1180,9 +1181,9 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
- dev_warn(priv->adapter->dev,
- "Disable link failed for TDLS peer %pM",
- sta_ptr->mac_addr);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Disable link failed for TDLS peer %pM",
+ sta_ptr->mac_addr);
}
mwifiex_del_all_sta_list(priv);
@@ -1204,9 +1205,9 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
(peer->failure_count <
MWIFIEX_TDLS_MAX_FAIL_COUNT)) {
peer->tdls_status = TDLS_SETUP_INPROGRESS;
- dev_dbg(priv->adapter->dev,
- "setup TDLS link, peer=%pM rssi=%d\n",
- peer->mac_addr, peer->rssi);
+ mwifiex_dbg(priv->adapter, INFO,
+ "setup TDLS link, peer=%pM rssi=%d\n",
+ peer->mac_addr, peer->rssi);
cfg80211_tdls_oper_request(priv->netdev,
peer->mac_addr,
@@ -1272,8 +1273,8 @@ void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
tdls_peer->rssi_jiffies = jiffies;
INIT_LIST_HEAD(&tdls_peer->list);
list_add_tail(&tdls_peer->list, &priv->auto_tdls_list);
- dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n",
- mac);
+ mwifiex_dbg(priv->adapter, INFO,
+ "Add auto TDLS peer= %pM to list\n", mac);
}
spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
@@ -1341,8 +1342,8 @@ void mwifiex_check_auto_tdls(unsigned long context)
return;
if (!priv->auto_tdls_timer_active) {
- dev_dbg(priv->adapter->dev,
- "auto TDLS timer inactive; return");
+ mwifiex_dbg(priv->adapter, INFO,
+ "auto TDLS timer inactive; return");
return;
}
@@ -1368,9 +1369,9 @@ void mwifiex_check_auto_tdls(unsigned long context)
!tdls_peer->rssi) &&
tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
- dev_dbg(priv->adapter->dev,
- "teardown TDLS link,peer=%pM rssi=%d\n",
- tdls_peer->mac_addr, -tdls_peer->rssi);
+ mwifiex_dbg(priv->adapter, MSG,
+ "teardown TDLS link,peer=%pM rssi=%d\n",
+ tdls_peer->mac_addr, -tdls_peer->rssi);
tdls_peer->do_discover = true;
priv->check_tdls_tx = true;
cfg80211_tdls_oper_request(priv->netdev,
@@ -1384,9 +1385,10 @@ void mwifiex_check_auto_tdls(unsigned long context)
MWIFIEX_TDLS_MAX_FAIL_COUNT) {
priv->check_tdls_tx = true;
tdls_peer->do_setup = true;
- dev_dbg(priv->adapter->dev,
- "check TDLS with peer=%pM rssi=%d\n",
- tdls_peer->mac_addr, -tdls_peer->rssi);
+ mwifiex_dbg(priv->adapter, INFO,
+ "check TDLS with peer=%pM\t"
+ "rssi=%d\n", tdls_peer->mac_addr,
+ tdls_peer->rssi);
}
}
spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a245f444aeec..28dcc84a34d2 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -50,11 +50,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
if (!priv) {
- dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
+ mwifiex_dbg(adapter, ERROR,
+ "data: priv not found. Drop RX packet\n");
dev_kfree_skb_any(skb);
return -1;
}
+ mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data,
+ min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
+
memset(rx_info, 0, sizeof(*rx_info));
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
@@ -112,10 +116,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
skb, tx_param);
}
}
+ mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data,
+ min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
switch (ret) {
case -ENOSR:
- dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
break;
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -124,13 +130,14 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
if (local_tx_pd)
local_tx_pd->flags = 0;
}
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
- ret);
+ mwifiex_dbg(adapter, ERROR,
+ "mwifiex_write_data_async failed: 0x%X\n",
+ ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
@@ -162,7 +169,8 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
tx_info->bss_type);
if (!priv) {
- dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+ mwifiex_dbg(adapter, ERROR,
+ "data: priv not found. Drop TX packet\n");
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, 0);
return ret;
@@ -187,7 +195,7 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
}
switch (ret) {
case -ENOSR:
- dev_err(adapter->dev, "data: -ENOSR is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
break;
case -EBUSY:
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -202,13 +210,13 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
atomic_add(tx_info->aggr_num, &adapter->tx_queued);
else
atomic_inc(&adapter->tx_queued);
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
- ret);
+ mwifiex_dbg(adapter, ERROR,
+ "mwifiex_write_data_async failed: 0x%X\n", ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
@@ -319,7 +327,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
txq = netdev_get_tx_queue(priv->netdev, index);
if (netif_tx_queue_stopped(txq)) {
netif_tx_wake_queue(txq);
- dev_dbg(adapter->dev, "wake queue: %d\n", index);
+ mwifiex_dbg(adapter, DATA, "wake queue: %d\n", index);
}
}
done:
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index f5c2af01ba0a..a4ae28353b6d 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
params->beacon.tail_len);
if (ht_ie) {
- memcpy(&bss_cfg->ht_cap, ht_ie,
+ memcpy(&bss_cfg->ht_cap, ht_ie + 2,
sizeof(struct ieee80211_ht_cap));
cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
memset(&bss_cfg->ht_cap.mcs, 0,
@@ -184,8 +184,8 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
default:
- dev_warn(priv->adapter->dev,
- "Unsupported RX-STBC, default to 2x2\n");
+ mwifiex_dbg(priv->adapter, WARN,
+ "Unsupported RX-STBC, default to 2x2\n");
bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
break;
@@ -767,8 +767,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
break;
default:
- dev_err(priv->adapter->dev,
- "PREP_CMD: unknown cmd %#x\n", cmd_no);
+ mwifiex_dbg(priv->adapter, ERROR,
+ "PREP_CMD: unknown cmd %#x\n", cmd_no);
return -1;
}
@@ -806,24 +806,28 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg)
{
if (mwifiex_del_mgmt_ies(priv))
- dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to delete mgmt IEs!\n");
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL, true)) {
- dev_err(priv->adapter->dev, "Failed to stop the BSS\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to stop the BSS\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
HostCmd_ACT_GEN_SET,
UAP_BSS_PARAMS_I, bss_cfg, false)) {
- dev_err(priv->adapter->dev, "Failed to set the SSID\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to set the SSID\n");
return -1;
}
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
HostCmd_ACT_GEN_SET, 0, NULL, false)) {
- dev_err(priv->adapter->dev, "Failed to start the BSS\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to start the BSS\n");
return -1;
}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index f4794cdc36d2..06ce3fe660f1 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -80,8 +80,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
node = mwifiex_add_sta_entry(priv, event->sta_addr);
if (!node) {
- dev_warn(adapter->dev,
- "could not create station entry!\n");
+ mwifiex_dbg(adapter, ERROR,
+ "could not create station entry!\n");
return -1;
}
@@ -128,7 +128,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_UAP_BSS_START:
- dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "AP EVENT: event id: %#x\n", eventcause);
memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
ETH_ALEN);
if (priv->hist_data)
@@ -136,50 +137,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
- dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "AP EVENT: event id: %#x\n", eventcause);
break;
case EVENT_AMSDU_AGGR_CTRL:
ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
- dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+ mwifiex_dbg(adapter, EVENT,
+ "event: AMSDU_AGGR_CTRL %d\n", ctrl);
if (priv->media_connected) {
adapter->tx_buf_size =
min_t(u16, adapter->curr_tx_buf_size, ctrl);
- dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
- adapter->tx_buf_size);
+ mwifiex_dbg(adapter, EVENT,
+ "event: tx_buf_size %d\n",
+ adapter->tx_buf_size);
}
break;
case EVENT_ADDBA:
- dev_dbg(adapter->dev, "event: ADDBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
if (priv->media_connected)
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
HostCmd_ACT_GEN_SET, 0,
adapter->event_body, false);
break;
case EVENT_DELBA:
- dev_dbg(adapter->dev, "event: DELBA Request\n");
+ mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
if (priv->media_connected)
mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
break;
case EVENT_BA_STREAM_TIEMOUT:
- dev_dbg(adapter->dev, "event: BA Stream timeout\n");
+ mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n");
if (priv->media_connected) {
ba_timeout = (void *)adapter->event_body;
mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
}
break;
case EVENT_EXT_SCAN_REPORT:
- dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
if (adapter->ext_scan)
return mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
break;
case EVENT_TX_STATUS_REPORT:
- dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
mwifiex_parse_tx_status_event(priv, adapter->event_body);
break;
case EVENT_PS_SLEEP:
- dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
adapter->ps_state = PS_STATE_PRE_SLEEP;
@@ -187,12 +191,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_PS_AWAKE:
- dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+ mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
if (!adapter->pps_uapsd_mode &&
priv->media_connected && adapter->sleep_period.period) {
adapter->pps_uapsd_mode = true;
- dev_dbg(adapter->dev,
- "event: PPS/UAPSD mode activated\n");
+ mwifiex_dbg(adapter, EVENT,
+ "event: PPS/UAPSD mode activated\n");
}
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -218,16 +222,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
break;
case EVENT_CHANNEL_REPORT_RDY:
- dev_dbg(adapter->dev, "event: Channel Report\n");
+ mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
break;
case EVENT_RADAR_DETECTED:
- dev_dbg(adapter->dev, "event: Radar detected\n");
+ mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
break;
default:
- dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
- eventcause);
+ mwifiex_dbg(adapter, EVENT,
+ "event: unknown event id: %#x\n", eventcause);
break;
}
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 38ac4d74c486..61c52fdf945d 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -103,8 +103,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
if ((atomic_read(&adapter->pending_bridged_pkts) >=
MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
- dev_err(priv->adapter->dev,
- "Tx: Bridge packet limit reached. Drop packet!\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: Bridge packet limit reached. Drop packet!\n");
kfree_skb(skb);
mwifiex_uap_cleanup_tx_queues(priv);
return;
@@ -153,15 +153,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
skb_pull(skb, hdr_chop);
if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
- dev_dbg(priv->adapter->dev,
- "data: Tx: insufficient skb headroom %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, ERROR,
+ "data: Tx: insufficient skb headroom %d\n",
+ skb_headroom(skb));
/* Insufficient skb headroom - allocate a new skb */
new_skb =
skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
if (unlikely(!new_skb)) {
- dev_err(priv->adapter->dev,
- "Tx: cannot allocate new_skb\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Tx: cannot allocate new_skb\n");
kfree_skb(skb);
priv->stats.tx_dropped++;
return;
@@ -169,8 +169,9 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
kfree_skb(skb);
skb = new_skb;
- dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
- skb_headroom(skb));
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: new skb headroom %d\n",
+ skb_headroom(skb));
}
tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -225,7 +226,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
/* don't do packet forwarding in disconnected state */
if (!priv->media_connected) {
- dev_err(adapter->dev, "drop packet in disconnected state.\n");
+ mwifiex_dbg(adapter, ERROR,
+ "drop packet in disconnected state.\n");
dev_kfree_skb_any(skb);
return 0;
}
@@ -275,10 +277,10 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
- dev_err(adapter->dev,
- "wrong rx packet: len=%d, offset=%d, length=%d\n",
- skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
- le16_to_cpu(uap_rx_pd->rx_pkt_length));
+ mwifiex_dbg(adapter, ERROR,
+ "wrong rx packet: len=%d, offset=%d, length=%d\n",
+ skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+ le16_to_cpu(uap_rx_pd->rx_pkt_length));
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return 0;
@@ -287,7 +289,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
if (rx_pkt_type == PKT_TYPE_MGMT) {
ret = mwifiex_process_mgmt_packet(priv, skb);
if (ret)
- dev_err(adapter->dev, "Rx of mgmt packet failed");
+ mwifiex_dbg(adapter, ERROR,
+ "Rx of mgmt packet failed");
dev_kfree_skb_any(skb);
return ret;
}
@@ -354,7 +357,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
INTF_HEADER_LEN;
if (!skb->len) {
- dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+ mwifiex_dbg(adapter, ERROR,
+ "Tx: bad packet length: %d\n", skb->len);
tx_info->status_code = -1;
return skb->data;
}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index fd8027f200a0..aada93425f80 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -60,7 +60,6 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size);
static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
struct sk_buff *skb, u8 ep)
{
- struct device *dev = adapter->dev;
u32 recv_type;
__le32 tmp;
int ret;
@@ -69,13 +68,15 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
mwifiex_process_hs_config(adapter);
if (skb->len < INTF_HEADER_LEN) {
- dev_err(dev, "%s: invalid skb->len\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: invalid skb->len\n", __func__);
return -1;
}
switch (ep) {
case MWIFIEX_USB_EP_CMD_EVENT:
- dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
+ mwifiex_dbg(adapter, EVENT,
+ "%s: EP_CMD_EVENT\n", __func__);
skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
recv_type = le32_to_cpu(tmp);
skb_pull(skb, INTF_HEADER_LEN);
@@ -83,11 +84,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
switch (recv_type) {
case MWIFIEX_USB_TYPE_CMD:
if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
- dev_err(dev, "CMD: skb->len too large\n");
+ mwifiex_dbg(adapter, ERROR,
+ "CMD: skb->len too large\n");
ret = -1;
goto exit_restore_skb;
} else if (!adapter->curr_cmd) {
- dev_dbg(dev, "CMD: no curr_cmd\n");
+ mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n");
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(
adapter, skb->data,
@@ -104,16 +106,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
break;
case MWIFIEX_USB_TYPE_EVENT:
if (skb->len < sizeof(u32)) {
- dev_err(dev, "EVENT: skb->len too small\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EVENT: skb->len too small\n");
ret = -1;
goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
adapter->event_cause = le32_to_cpu(tmp);
- dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
+ mwifiex_dbg(adapter, EVENT,
+ "event_cause %#x\n", adapter->event_cause);
if (skb->len > MAX_EVENT_SIZE) {
- dev_err(dev, "EVENT: event body too large\n");
+ mwifiex_dbg(adapter, ERROR,
+ "EVENT: event body too large\n");
ret = -1;
goto exit_restore_skb;
}
@@ -125,14 +130,16 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
adapter->event_skb = skb;
break;
default:
- dev_err(dev, "unknown recv_type %#x\n", recv_type);
+ mwifiex_dbg(adapter, ERROR,
+ "unknown recv_type %#x\n", recv_type);
return -1;
}
break;
case MWIFIEX_USB_EP_DATA:
- dev_dbg(dev, "%s: EP_DATA\n", __func__);
+ mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__);
if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
- dev_err(dev, "DATA: skb->len too large\n");
+ mwifiex_dbg(adapter, ERROR,
+ "DATA: skb->len too large\n");
return -1;
}
@@ -141,7 +148,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
atomic_inc(&adapter->rx_pending);
break;
default:
- dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: unknown endport %#x\n", __func__, ep);
return -1;
}
@@ -176,8 +184,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
if (recv_length) {
if (urb->status || (adapter->surprise_removed)) {
- dev_err(adapter->dev,
- "URB status is failed: %d\n", urb->status);
+ mwifiex_dbg(adapter, ERROR,
+ "URB status is failed: %d\n", urb->status);
/* Do not free skb in case of command ep */
if (card->rx_cmd_ep != context->ep)
dev_kfree_skb_any(skb);
@@ -190,8 +198,9 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
status = mwifiex_usb_recv(adapter, skb, context->ep);
- dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
- recv_length, status);
+ mwifiex_dbg(adapter, INFO,
+ "info: recv_length=%d, status=%d\n",
+ recv_length, status);
if (status == -EINPROGRESS) {
mwifiex_queue_main_work(adapter);
@@ -203,8 +212,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
return;
} else {
if (status == -1)
- dev_err(adapter->dev,
- "received data processing failed!\n");
+ mwifiex_dbg(adapter, ERROR,
+ "received data processing failed!\n");
/* Do not free skb in case of command ep */
if (card->rx_cmd_ep != context->ep)
@@ -212,8 +221,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
}
} else if (urb->status) {
if (!adapter->is_suspended) {
- dev_warn(adapter->dev,
- "Card is removed: %d\n", urb->status);
+ mwifiex_dbg(adapter, FATAL,
+ "Card is removed: %d\n", urb->status);
adapter->surprise_removed = true;
}
dev_kfree_skb_any(skb);
@@ -249,14 +258,17 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
struct mwifiex_adapter *adapter = context->adapter;
struct usb_card_rec *card = adapter->card;
- dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
+ mwifiex_dbg(adapter, INFO,
+ "%s: status: %d\n", __func__, urb->status);
if (context->ep == card->tx_cmd_ep) {
- dev_dbg(adapter->dev, "%s: CMD\n", __func__);
+ mwifiex_dbg(adapter, CMD,
+ "%s: CMD\n", __func__);
atomic_dec(&card->tx_cmd_urb_pending);
adapter->cmd_sent = false;
} else {
- dev_dbg(adapter->dev, "%s: DATA\n", __func__);
+ mwifiex_dbg(adapter, DATA,
+ "%s: DATA\n", __func__);
atomic_dec(&card->tx_data_urb_pending);
mwifiex_write_data_complete(adapter, context->skb, 0,
urb->status ? -1 : 0);
@@ -275,8 +287,8 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
if (card->rx_cmd_ep != ctx->ep) {
ctx->skb = dev_alloc_skb(size);
if (!ctx->skb) {
- dev_err(adapter->dev,
- "%s: dev_alloc_skb failed\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: dev_alloc_skb failed\n", __func__);
return -ENOMEM;
}
}
@@ -291,7 +303,7 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
atomic_inc(&card->rx_data_urb_pending);
if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
- dev_err(adapter->dev, "usb_submit_urb failed\n");
+ mwifiex_dbg(adapter, ERROR, "usb_submit_urb failed\n");
dev_kfree_skb_any(ctx->skb);
ctx->skb = NULL;
@@ -468,7 +480,8 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
adapter = card->adapter;
if (unlikely(adapter->is_suspended))
- dev_warn(adapter->dev, "Device already suspended\n");
+ mwifiex_dbg(adapter, WARN,
+ "Device already suspended\n");
mwifiex_enable_hs(adapter);
@@ -519,7 +532,8 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
adapter = card->adapter;
if (unlikely(!adapter->is_suspended)) {
- dev_warn(adapter->dev, "Device already resumed\n");
+ mwifiex_dbg(adapter, WARN,
+ "Device already resumed\n");
return 0;
}
@@ -578,7 +592,8 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
mwifiex_usb_free(card);
- dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+ mwifiex_dbg(adapter, FATAL,
+ "%s: removing card\n", __func__);
mwifiex_remove_card(adapter, &add_remove_card_sem);
usb_set_intfdata(intf, NULL);
@@ -608,7 +623,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->tx_cmd.urb) {
- dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "tx_cmd.urb allocation failed\n");
return -ENOMEM;
}
@@ -620,8 +636,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->tx_data_list[i].urb) {
- dev_err(adapter->dev,
- "tx_data_list[] urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "tx_data_list[] urb allocation failed\n");
return -ENOMEM;
}
}
@@ -639,15 +655,13 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->rx_cmd.urb) {
- dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n");
return -ENOMEM;
}
card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
- if (!card->rx_cmd.skb) {
- dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
+ if (!card->rx_cmd.skb)
return -ENOMEM;
- }
if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
return -1;
@@ -658,8 +672,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
if (!card->rx_data_list[i].urb) {
- dev_err(adapter->dev,
- "rx_data_list[] urb allocation failed\n");
+ mwifiex_dbg(adapter, ERROR,
+ "rx_data_list[] urb allocation failed\n");
return -1;
}
if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
@@ -683,7 +697,8 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
*len, &actual_length, timeout);
if (ret) {
- dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "usb_bulk_msg for tx failed: %d\n", ret);
return ret;
}
@@ -702,7 +717,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
*len, &actual_length, timeout);
if (ret) {
- dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "usb_bulk_msg for rx failed: %d\n", ret);
return ret;
}
@@ -722,13 +738,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
struct urb *tx_urb;
if (adapter->is_suspended) {
- dev_err(adapter->dev,
- "%s: not allowed while suspended\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: not allowed while suspended\n", __func__);
return -1;
}
if (adapter->surprise_removed) {
- dev_err(adapter->dev, "%s: device removed\n", __func__);
+ mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
return -1;
}
@@ -737,7 +753,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
return -EBUSY;
}
- dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
+ mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
if (ep == card->tx_cmd_ep) {
context = &card->tx_cmd;
@@ -764,7 +780,8 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
atomic_inc(&card->tx_data_urb_pending);
if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
- dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: usb_submit_urb failed\n", __func__);
if (ep == card->tx_cmd_ep) {
atomic_dec(&card->tx_cmd_urb_pending);
} else {
@@ -843,8 +860,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
u8 check_winner = 1;
if (!firmware) {
- dev_err(adapter->dev,
- "No firmware image found! Terminating download\n");
+ mwifiex_dbg(adapter, ERROR,
+ "No firmware image found! Terminating download\n");
ret = -1;
goto fw_exit;
}
@@ -889,8 +906,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_USB_EP_CMD_EVENT,
MWIFIEX_USB_TIMEOUT);
if (ret) {
- dev_err(adapter->dev,
- "write_data_sync: failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "write_data_sync: failed: %d\n",
+ ret);
continue;
}
@@ -902,8 +920,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
MWIFIEX_USB_EP_CMD_EVENT,
MWIFIEX_USB_TIMEOUT);
if (ret) {
- dev_err(adapter->dev,
- "read_data_sync: failed: %d\n", ret);
+ mwifiex_dbg(adapter, ERROR,
+ "read_data_sync: failed: %d\n",
+ ret);
continue;
}
@@ -913,17 +932,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* check 1st firmware block resp for highest bit set */
if (check_winner) {
if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
- dev_warn(adapter->dev,
- "USB is not the winner %#x\n",
- sync_fw.cmd);
+ mwifiex_dbg(adapter, WARN,
+ "USB is not the winner %#x\n",
+ sync_fw.cmd);
/* returning success */
ret = 0;
goto cleanup;
}
- dev_dbg(adapter->dev,
- "USB is the winner, start to download FW\n");
+ mwifiex_dbg(adapter, MSG,
+ "start to download FW...\n");
check_winner = 0;
break;
@@ -931,9 +950,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* check the firmware block response for CRC errors */
if (sync_fw.cmd) {
- dev_err(adapter->dev,
- "FW received block with CRC %#x\n",
- sync_fw.cmd);
+ mwifiex_dbg(adapter, ERROR,
+ "FW received block with CRC %#x\n",
+ sync_fw.cmd);
ret = -1;
continue;
}
@@ -945,8 +964,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
} while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
cleanup:
- dev_notice(adapter->dev,
- "info: FW download over, size %d bytes\n", tlen);
+ mwifiex_dbg(adapter, MSG,
+ "info: FW download over, size %d bytes\n", tlen);
kfree(recv_buff);
kfree(fwdata);
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index b8a45872354d..370323a47ecb 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -26,6 +26,8 @@
#include "11n.h"
static struct mwifiex_debug_data items[] = {
+ {"debug_mask", item_size(debug_mask),
+ item_addr(debug_mask), 1},
{"int_counter", item_size(int_counter),
item_addr(int_counter), 1},
{"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
@@ -158,7 +160,8 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
} else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) {
cmd = HostCmd_CMD_FUNC_SHUTDOWN;
} else {
- dev_err(priv->adapter->dev, "unsupported parameter\n");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported parameter\n");
return -1;
}
@@ -178,6 +181,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
if (info) {
+ info->debug_mask = adapter->debug_mask;
memcpy(info->packets_out,
priv->wmm.packets_out,
sizeof(priv->wmm.packets_out));
@@ -336,9 +340,9 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
action_code = *(payload + sizeof(struct ieee80211_hdr) + 1);
if (category == WLAN_CATEGORY_PUBLIC &&
action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
- dev_dbg(priv->adapter->dev,
- "TDLS discovery response %pM nf=%d, snr=%d\n",
- ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
+ mwifiex_dbg(priv->adapter, INFO,
+ "TDLS discovery response %pM nf=%d, snr=%d\n",
+ ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
mwifiex_auto_tdls_update_peer_signal(priv,
ieee_hdr->addr2,
rx_pd->snr,
@@ -346,8 +350,8 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
}
break;
default:
- dev_dbg(priv->adapter->dev,
- "unknown mgmt frame subytpe %#x\n", stype);
+ mwifiex_dbg(priv->adapter, INFO,
+ "unknown mgmt frame subtype %#x\n", stype);
}
return 0;
@@ -369,8 +373,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
if (!priv->mgmt_frame_mask ||
priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
- dev_dbg(priv->adapter->dev,
- "do not receive mgmt frames on uninitialized intf");
+ mwifiex_dbg(priv->adapter, ERROR,
+ "do not receive mgmt frames on uninitialized intf");
return -1;
}
@@ -464,13 +468,14 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- dev_dbg(adapter->dev, "cmd completed: status=%d\n",
- adapter->cmd_wait_q.status);
+ mwifiex_dbg(adapter, CMD,
+ "cmd completed: status=%d\n",
+ adapter->cmd_wait_q.status);
*(cmd_node->condition) = true;
if (adapter->cmd_wait_q.status == -ETIMEDOUT)
- dev_err(adapter->dev, "cmd timeout\n");
+ mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
else
wake_up_interruptible(&adapter->cmd_wait_q.wait);
@@ -536,13 +541,16 @@ void
mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
int ies_len, struct mwifiex_sta_node *node)
{
+ struct ieee_types_header *ht_cap_ie;
const struct ieee80211_ht_cap *ht_cap;
if (!ies)
return;
- ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
- if (ht_cap) {
+ ht_cap_ie = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies,
+ ies_len);
+ if (ht_cap_ie) {
+ ht_cap = (void *)(ht_cap_ie + 1);
node->is_11n_enabled = 1;
node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
IEEE80211_HT_CAP_MAX_AMSDU ?
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index b2e99569a0f8..a8ea21c3340c 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -107,7 +107,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
ra_list->total_pkt_count = 0;
- dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
+ mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
return ra_list;
}
@@ -150,7 +150,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
- dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
+ mwifiex_dbg(adapter, INFO,
+ "info: created ra_list %p\n", ra_list);
if (!ra_list)
break;
@@ -178,8 +179,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
- dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
- ra_list, ra_list->is_11n_enabled);
+ mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
+ ra_list, ra_list->is_11n_enabled);
if (ra_list->is_11n_enabled) {
ra_list->ba_pkt_count = 0;
@@ -241,11 +242,12 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
return;
}
- dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
- "qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
- IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
- wmm_ie->reserved);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM Parameter IE: version=%d,\t"
+ "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+ wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+ wmm_ie->reserved);
for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
@@ -257,10 +259,10 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
priv->wmm.queue_priority[ac_idx] = ac_idx;
tmp[ac_idx] = avg_back_off;
- dev_dbg(priv->adapter->dev,
- "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
- (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
- cw_min, avg_back_off);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
+ (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
+ cw_min, avg_back_off);
mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
}
@@ -333,8 +335,8 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
{
int ac_val;
- dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
- "BK(0), BE(1), VI(2), VO(3)\n");
+ mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
+ "BK(0), BE(1), VI(2), VO(3)\n");
if (!priv->wmm_enabled) {
/* WMM is not enabled, default priorities */
@@ -346,9 +348,10 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
priv->wmm.ac_down_graded_vals[ac_val]
= mwifiex_wmm_eval_downgrade_ac(priv,
(enum mwifiex_wmm_ac_e) ac_val);
- dev_dbg(priv->adapter->dev,
- "info: WMM: AC PRIO %d maps to %d\n",
- ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: AC PRIO %d maps to %d\n",
+ ac_val,
+ priv->wmm.ac_down_graded_vals[ac_val]);
}
}
}
@@ -428,6 +431,15 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->tos_to_tid_inv[i];
}
+ priv->aggr_prio_tbl[6].amsdu
+ = priv->aggr_prio_tbl[6].ampdu_ap
+ = priv->aggr_prio_tbl[6].ampdu_user
+ = BA_STREAM_NOT_ALLOWED;
+
+ priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
+ = priv->aggr_prio_tbl[7].ampdu_user
+ = BA_STREAM_NOT_ALLOWED;
+
mwifiex_set_ba_params(priv);
mwifiex_reset_11n_rx_seq_num(priv);
@@ -512,8 +524,8 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
int i;
for (i = 0; i < MAX_NUM_TID; ++i) {
- dev_dbg(priv->adapter->dev,
- "info: ra_list: freeing buf for tid %d\n", i);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: ra_list: freeing buf for tid %d\n", i);
list_for_each_entry_safe(ra_list, tmp_node,
&priv->wmm.tid_tbl_ptr[i].ra_list,
list) {
@@ -685,14 +697,15 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
- dev_dbg(adapter->dev,
- "TDLS setup packet for %pM. Don't block\n", ra);
+ mwifiex_dbg(adapter, DATA,
+ "TDLS setup packet for %pM.\t"
+ "Don't block\n", ra);
else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
tdls_status = mwifiex_get_tdls_link_status(priv, ra);
}
if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
- dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
+ mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@ -773,6 +786,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
{
u8 *curr = (u8 *) &resp->params.get_wmm_status;
uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
+ int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
bool valid = true;
struct mwifiex_ie_types_data *tlv_hdr;
@@ -780,8 +794,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
struct mwifiex_wmm_ac_status *ac_status;
- dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
- resp_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
+ resp_len);
while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
tlv_hdr = (struct mwifiex_ie_types_data *) curr;
@@ -795,12 +810,12 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
tlv_wmm_qstatus =
(struct mwifiex_ie_types_wmm_queue_status *)
tlv_hdr;
- dev_dbg(priv->adapter->dev,
- "info: CMD_RESP: WMM_GET_STATUS:"
- " QSTATUS TLV: %d, %d, %d\n",
- tlv_wmm_qstatus->queue_index,
- tlv_wmm_qstatus->flow_required,
- tlv_wmm_qstatus->disabled);
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: CMD_RESP: WMM_GET_STATUS:\t"
+ "QSTATUS TLV: %d, %d, %d\n",
+ tlv_wmm_qstatus->queue_index,
+ tlv_wmm_qstatus->flow_required,
+ tlv_wmm_qstatus->disabled);
ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
queue_index];
@@ -823,11 +838,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
wmm_param_ie->vend_hdr.element_id =
WLAN_EID_VENDOR_SPECIFIC;
- dev_dbg(priv->adapter->dev,
- "info: CMD_RESP: WMM_GET_STATUS:"
- " WMM Parameter Set Count: %d\n",
- wmm_param_ie->qos_info_bitmap &
- IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
+ mwifiex_dbg(priv->adapter, CMD,
+ "info: CMD_RESP: WMM_GET_STATUS:\t"
+ "WMM Parameter Set Count: %d\n",
+ wmm_param_ie->qos_info_bitmap & mask);
memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
wmm_ie, wmm_param_ie,
@@ -875,9 +889,9 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
if (!wmm_ie)
return 0;
- dev_dbg(priv->adapter->dev,
- "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
- wmm_ie->vend_hdr.element_id);
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
+ wmm_ie->vend_hdr.element_id);
if ((priv->wmm_required ||
(ht_cap && (priv->adapter->config_bands & BAND_GN ||
@@ -927,8 +941,8 @@ mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
*/
ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
- dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
- " %d ms sent to FW\n", queue_delay, ret_val);
+ mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
+ "%d ms sent to FW\n", queue_delay, ret_val);
return ret_val;
}
@@ -1082,14 +1096,15 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
if (skb_queue_empty(&ptr->skb_head)) {
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
- dev_dbg(adapter->dev, "data: nothing to send\n");
+ mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
return;
}
skb = skb_dequeue(&ptr->skb_head);
tx_info = MWIFIEX_SKB_TXCB(skb);
- dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
+ mwifiex_dbg(adapter, DATA,
+ "data: dequeuing the packet %p %p\n", ptr, skb);
ptr->total_pkt_count--;
@@ -1205,7 +1220,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
switch (ret) {
case -EBUSY:
- dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+ mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
@@ -1224,7 +1239,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
case -1:
if (adapter->iface_type != MWIFIEX_PCIE)
adapter->data_sent = false;
- dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
+ mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
adapter->dbg.num_tx_host_to_card_failure++;
mwifiex_write_data_complete(adapter, skb, 0, ret);
break;
@@ -1263,7 +1278,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
tid = mwifiex_get_tid(ptr);
- dev_dbg(adapter->dev, "data: tid=%d\n", tid);
+ mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 95921167b53f..77361af68b18 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2380,7 +2380,7 @@ mwl8k_set_ht_caps(struct ieee80211_hw *hw,
if (cap & MWL8K_CAP_GREENFIELD)
band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
if (cap & MWL8K_CAP_AMPDU) {
- hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
}
@@ -5192,7 +5192,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
priv->sniffer_enabled = true;
}
- *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
+ *total_flags &= FIF_ALLMULTI |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
FIF_OTHER_BSS;
@@ -5431,7 +5431,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 *addr = sta->addr, idx;
struct mwl8k_sta *sta_info = MWL8K_STA(sta);
- if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ if (!ieee80211_hw_check(hw, AMPDU_AGGREGATION))
return -ENOTSUPP;
spin_lock(&priv->stream_lock);
@@ -6076,14 +6076,15 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
hw->queues = MWL8K_TX_WMM_QUEUES;
/* Set rssi values to dBm */
- hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
/*
* Ask mac80211 to not to trigger PS mode
* based on PM bit of incoming frames.
*/
if (priv->ap_fw)
- hw->flags |= IEEE80211_HW_AP_LINK_PS;
+ ieee80211_hw_set(hw, AP_LINK_PS);
hw->vif_data_size = sizeof(struct mwl8k_vif);
hw->sta_data_size = sizeof(struct mwl8k_sta);
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 275408eaf95e..257a9eadd595 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -351,8 +351,7 @@ int p54_setup_mac(struct p54_common *priv)
* "TRANSPARENT and PROMISCUOUS are mutually exclusive"
* STSW45X0C LMAC API - page 12
*/
- if (((priv->filter_flags & FIF_PROMISC_IN_BSS) ||
- (priv->filter_flags & FIF_OTHER_BSS)) &&
+ if (priv->filter_flags & FIF_OTHER_BSS &&
(mode != P54_FILTER_TYPE_PROMISCUOUS))
mode |= P54_FILTER_TYPE_TRANSPARENT;
} else {
diff --git a/drivers/net/wireless/p54/led.c b/drivers/net/wireless/p54/led.c
index 1f6fd5ff5531..9a8fedd3c0f5 100644
--- a/drivers/net/wireless/p54/led.c
+++ b/drivers/net/wireless/p54/led.c
@@ -83,7 +83,7 @@ static void p54_led_brightness_set(struct led_classdev *led_dev,
static int p54_register_led(struct p54_common *priv,
unsigned int led_index,
- char *name, char *trigger)
+ char *name, const char *trigger)
{
struct p54_led_dev *led = &priv->leds[led_index];
int err;
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index e79674f73dc5..7805864e76f9 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -395,13 +395,11 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
{
struct p54_common *priv = dev->priv;
- *total_flags &= FIF_PROMISC_IN_BSS |
- FIF_ALLMULTI |
- FIF_OTHER_BSS;
+ *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS;
priv->filter_flags = *total_flags;
- if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+ if (changed_flags & FIF_OTHER_BSS)
p54_setup_mac(priv);
if (changed_flags & FIF_ALLMULTI || multicast)
@@ -748,12 +746,12 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
spin_lock_init(&priv->tx_stats_lock);
skb_queue_head_init(&priv->tx_queue);
skb_queue_head_init(&priv->tx_pending);
- dev->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ ieee80211_hw_set(dev, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(dev, MFP_CAPABLE);
+ ieee80211_hw_set(dev, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(dev, SUPPORTS_PS);
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+ ieee80211_hw_set(dev, SIGNAL_DBM);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 477f86354dc5..0881ba8535f4 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -143,7 +143,7 @@ static int psm;
static char *essid;
/* Default to encapsulation unless translation requested */
-static bool translate = 1;
+static bool translate = true;
static int country = USA;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d72ff8e7125d..71a825c750cf 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -356,9 +356,9 @@ struct ndis_80211_pmkid {
#define CAP_MODE_80211G 4
#define CAP_MODE_MASK 7
-#define WORK_LINK_UP (1<<0)
-#define WORK_LINK_DOWN (1<<1)
-#define WORK_SET_MULTICAST_LIST (1<<2)
+#define WORK_LINK_UP 0
+#define WORK_LINK_DOWN 1
+#define WORK_SET_MULTICAST_LIST 2
#define RNDIS_WLAN_ALG_NONE 0
#define RNDIS_WLAN_ALG_WEP (1<<0)
@@ -2861,7 +2861,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
deauthenticate(usbdev);
- cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL);
}
netif_carrier_off(usbdev->net);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index aeaf87bb5518..7e804324bfa7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1062,10 +1062,9 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->priv = adapter;
adapter->hw = hw;
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_AMPDU_AGGREGATION |
- 0;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
hw->queues = MAX_HW_QUEUES;
hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index bdf5590ba304..9a3966cd6fbe 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -273,10 +273,8 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
@@ -1576,10 +1574,10 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 79f4fe65a119..1a6740b4d396 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -274,10 +274,8 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
@@ -1871,10 +1869,10 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 05c64597838d..b50d873145d5 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -434,10 +434,8 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
!(filter_flags & FIF_CONTROL));
- rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
@@ -1698,11 +1696,10 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
* multicast and broadcast traffic immediately instead of buffering it
* infinitly and thus dropping it after some time.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
/*
* Disable powersaving as default.
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index be2d54f257b1..0bc5ac56f283 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1513,8 +1513,7 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
!(filter_flags & FIF_PLCPFAIL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
@@ -7498,13 +7497,12 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
/*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
@@ -7514,8 +7512,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
* infinitly and thus dropping it after some time.
*/
if (!rt2x00_is_usb(rt2x00dev))
- rt2x00dev->hw->flags |=
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -7818,21 +7815,25 @@ EXPORT_SYMBOL_GPL(rt2800_probe_hw);
/*
* IEEE80211 stack callback functions.
*/
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
- u16 *iv16)
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct mac_iveiv_entry iveiv_entry;
u32 offset;
- offset = MAC_IVEIV_ENTRY(hw_key_idx);
+ if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+ offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
rt2800_register_multiread(rt2x00dev, offset,
&iveiv_entry, sizeof(iveiv_entry));
- memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
- memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
+ memcpy(&seq->tkip.iv16, &iveiv_entry.iv[0], 2);
+ memcpy(&seq->tkip.iv32, &iveiv_entry.iv[4], 4);
}
-EXPORT_SYMBOL_GPL(rt2800_get_tkip_seq);
+EXPORT_SYMBOL_GPL(rt2800_get_key_seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 3019db637a4b..1609b8a7f7eb 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -209,8 +209,9 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
- u16 *iv16);
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq);
int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
int rt2800_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue_idx,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cc1b3cc73c5a..0af22573a2eb 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -309,7 +309,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
- .get_tkip_seq = rt2800_get_tkip_seq,
+ .get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
.sta_add = rt2x00mac_sta_add,
.sta_remove = rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
index aaa7aa4cad9d..a985a5a7945e 100644
--- a/drivers/net/wireless/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/rt2x00/rt2800soc.c
@@ -148,7 +148,7 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
- .get_tkip_seq = rt2800_get_tkip_seq,
+ .get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
.sta_add = rt2x00mac_sta_add,
.sta_remove = rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 6ec2466b52b6..5932306084fd 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -835,7 +835,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.sw_scan_start = rt2x00mac_sw_scan_start,
.sw_scan_complete = rt2x00mac_sw_scan_complete,
.get_stats = rt2x00mac_get_stats,
- .get_tkip_seq = rt2800_get_tkip_seq,
+ .get_key_seq = rt2800_get_key_seq,
.set_rts_threshold = rt2800_set_rts_threshold,
.sta_add = rt2x00mac_sta_add,
.sta_remove = rt2x00mac_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 300876df056f..1b8a459a412b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -359,8 +359,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
FIF_PLCPFAIL |
FIF_CONTROL |
FIF_PSPOLL |
- FIF_OTHER_BSS |
- FIF_PROMISC_IN_BSS;
+ FIF_OTHER_BSS;
/*
* Apply some rules to the filters:
@@ -369,9 +368,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
* - Multicast filter seems to kill broadcast traffic so never use it.
*/
*total_flags |= FIF_ALLMULTI;
- if (*total_flags & FIF_OTHER_BSS ||
- *total_flags & FIF_PROMISC_IN_BSS)
- *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
/*
* If the device has a single filter for all control frames,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 819455009fe4..c0e730ea1b69 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -530,10 +530,8 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
- rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
@@ -2760,11 +2758,10 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
+ ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a5458cf01fb2..7081e13b4fd6 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -480,10 +480,8 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
- rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
- !(filter_flags & FIF_PROMISC_IN_BSS));
+ rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
- !(filter_flags & FIF_PROMISC_IN_BSS) &&
!rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
@@ -2107,16 +2105,15 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all hw fields.
*
- * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are
+ * Don't set IEEE80211_HOST_BROADCAST_PS_BUFFERING unless we are
* capable of sending the buffered frames out after the DTIM
* transmission using rt2x00lib_beacondone. This will send out
* multicast and broadcast traffic immediately instead of buffering it
* infinitly and thus dropping it after some time.
*/
- rt2x00dev->hw->flags =
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK;
+ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM);
+ ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS);
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 706b844bce00..a43a16fde59d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1802,8 +1802,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
priv->band.n_bitrates = 4;
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+
dev->vif_data_size = sizeof(struct rtl8180_vif);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
@@ -1868,9 +1869,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
}
if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
- dev->flags |= IEEE80211_HW_SIGNAL_DBM;
+ ieee80211_hw_set(dev, SIGNAL_DBM);
else
- dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
+ ieee80211_hw_set(dev, SIGNAL_UNSPEC);
rtl8180_eeprom_read(priv);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 629ad8cfa17b..b7f72f9c7988 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1478,9 +1478,9 @@ static int rtl8187_probe(struct usb_interface *intf,
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
+ ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(dev, SIGNAL_DBM);
/* Initialize rate-control variables */
dev->max_rates = 1;
dev->max_rate_tries = RETRY_COUNT;
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 5cf509d346e8..73067cac289c 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -100,7 +100,7 @@ config RTL8821AE
select RTLWIFI_PCI
select RTLBTCOEXIST
---help---
- This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe
+ This is the driver for Realtek RTL8821AE/RTL8812AE 802.11ac PCIe
wireless network adapters.
If you choose to build it as a module, it will be called rtl8821ae
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 01f56c7df8b5..0517a4f2d3f2 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -394,20 +394,18 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
}
}
/* <5> set hw caps */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_CONNECTION_MONITOR |
- /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */
- IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
/* swlps or hwlps has been set in diff chip in init_sw_vars */
- if (rtlpriv->psc.swctrl_lps)
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
- 0;
+ if (rtlpriv->psc.swctrl_lps) {
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ }
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
index cefe26991421..f2b9d11adc9e 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -1286,8 +1286,11 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
0x12, 0xe1, 0x90);
break;
case 3:
- btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
- 0x3, 0xf1, 0x90);
+ /* This call breaks BT when wireless is active -
+ * comment it out for now until a better fix is found:
+ * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+ * 0x3, 0xf1, 0x90);
+ */
break;
case 4:
btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 82733c6b8c46..782ac2fc4b28 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -27,8 +27,7 @@
#define __RTL_CORE_H__
#define RTL_SUPPORTED_FILTERS \
- (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | FIF_CONTROL | \
+ (FIF_ALLMULTI | FIF_CONTROL | \
FIF_OTHER_BSS | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC)
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 1893d01b9e78..a62bf0a65c32 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -40,6 +40,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
{COUNTRY_CODE_WORLD_WIDE_13, "EC"},
{COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+ {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
};
/*
@@ -124,6 +125,17 @@ static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
}
};
+static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
+ .n_reg_rules = 4,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTL819x_2GHZ_CH01_11,
+ RTL819x_2GHZ_CH12_13,
+ RTL819x_5GHZ_5150_5350,
+ RTL819x_5GHZ_5470_5850,
+ }
+};
+
static const struct ieee80211_regdomain rtl_regdom_14 = {
.n_reg_rules = 3,
.alpha2 = "99",
@@ -348,6 +360,8 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
+ return &rtl_regdom_12_13_5g_all;
default:
return &rtl_regdom_no_midband;
}
@@ -384,6 +398,25 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
return NULL;
}
+static u8 channel_plan_to_country_code(u8 channelplan)
+{
+ switch (channelplan) {
+ case 0x20:
+ case 0x21:
+ return COUNTRY_CODE_WORLD_WIDE_13;
+ case 0x22:
+ return COUNTRY_CODE_IC;
+ case 0x32:
+ return COUNTRY_CODE_TELEC_NETGEAR;
+ case 0x41:
+ return COUNTRY_CODE_GLOBAL_DOMAIN;
+ case 0x7f:
+ return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
+ default:
+ return COUNTRY_CODE_MAX; /*Error*/
+ }
+}
+
int rtl_regd_init(struct ieee80211_hw *hw,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
@@ -396,11 +429,12 @@ int rtl_regd_init(struct ieee80211_hw *hw,
return -EINVAL;
/* init country_code from efuse channel plan */
- rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+ rtlpriv->regd.country_code =
+ channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: EEPROM regdomain: 0x%0x\n",
- rtlpriv->regd.country_code);
+ RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
index 3bbbaaa68530..f7f15bce35dd 100644
--- a/drivers/net/wireless/rtlwifi/regd.h
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -49,6 +49,7 @@ enum country_code_type_t {
COUNTRY_CODE_GLOBAL_DOMAIN = 10,
COUNTRY_CODE_WORLD_WIDE_13 = 11,
COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
/*add new channel plan above this line */
COUNTRY_CODE_MAX
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
index 86ce5b1930e6..8ee83b093c0d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
@@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl88ee_clear_interrupt(hw);/*clear it here first*/
rtl_write_dword(rtlpriv, REG_HIMR,
rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE,
@@ -1919,8 +1903,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
- /* set channel paln to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
_rtl88ee_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
index ef28c8ea1e84..02013df968a0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
@@ -23,7 +23,7 @@
*
*****************************************************************************/
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
#include "pwrseq.h"
/* drivers should parse below arrays and do the corresponding actions */
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
index 79103347d967..f2d9c6116e5c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
@@ -26,7 +26,7 @@
#ifndef __RTL8723E_PWRSEQ_H__
#define __RTL8723E_PWRSEQ_H__
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
/* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd
* There are 6 HW Power States:
* 0: POFF--Power Off
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index d310d55d800e..189859617db8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -889,7 +889,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
rtl92c_init_beacon_parameters(hw, rtlhal->version);
rtl92c_init_ampdu_aggregation(hw);
- rtl92c_init_beacon_max_error(hw, true);
+ rtl92c_init_beacon_max_error(hw);
return err;
}
@@ -1323,7 +1323,6 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
bt_msr &= 0xfc;
- rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
NL80211_IFTYPE_STATION) {
_rtl92cu_stop_tx_beacon(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index adb810794eef..f3db6bc8596a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -613,7 +613,7 @@ void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, 0x4CA, 0x0708);
}
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index bf53652e4edd..58548e8f2c41 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -66,7 +66,7 @@ void rtl92c_init_edca_param(struct ieee80211_hw *hw,
void rtl92c_init_edca(struct ieee80211_hw *hw);
void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw);
void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
void rtl92c_init_retry_function(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index c5d4b8013cde..232865cc3ffd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -875,7 +875,7 @@ static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
break;
default:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+ "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index da0a6125f314..5f14308e8eb3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl92ee_clear_interrupt(hw);/*clear it here first*/
-
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
@@ -2194,8 +2177,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
"dev_addr: %pM\n", rtlefuse->dev_addr);
/*channel plan */
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
- /* set channel paln to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
_rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
hwinfo);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
index 67bb47d77b68..a4b7eac6856f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-}
-
void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl8723e_clear_interrupt(hw);/*clear it here first*/
rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
rtlpci->irq_enabled = false;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
index 69d4f0fc1af1..d5da0f3c1217 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
@@ -613,7 +613,7 @@ static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+ "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
break;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
index b681af3c7a35..c983d2fe147f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
@@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
-
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl8723be_clear_interrupt(hw);/*clear it here first*/
-
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
@@ -2139,8 +2122,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
- /* set channel plan to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
if (rtlhal->oem_id == RT_CID_DEFAULT) {
/* Does this one have a Toshiba SMID from group 1? */
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 8704eee9f3a4..3236d44b459d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
}
}
-static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 tmp;
- tmp = rtl_read_dword(rtlpriv, REG_HISR);
- /*printk("clear interrupt first:\n");
- printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
- rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HISRE);
- /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
- rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
- tmp = rtl_read_dword(rtlpriv, REG_HSISR);
- /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
- rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- rtl8821ae_clear_interrupt(hw);/*clear it here first*/
-
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
rtlpci->irq_enabled = true;
@@ -3232,8 +3212,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
if (rtlefuse->eeprom_channelplan == 0xff)
rtlefuse->eeprom_channelplan = 0x7F;
- /* set channel paln to world wide 13 */
- /* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */
+ /* set channel plan from efuse */
+ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*parse xtal*/
rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 5d54d16a59e7..cd4777954f87 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -763,8 +763,7 @@ static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
return (u64)(unsigned long)fp;
}
-#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define WL1251_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
@@ -795,10 +794,6 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
- if (*total & FIF_PROMISC_IN_BSS) {
- wl->rx_config |= CFG_BSSID_FILTER_EN;
- wl->rx_config |= CFG_RX_ALL_GOOD;
- }
if (*total & FIF_ALLMULTI)
/*
* CFG_MC_FILTER_EN in rx_config needs to be 0 to receive
@@ -825,7 +820,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+ if (*total & FIF_ALLMULTI)
ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
else if (fp)
ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
@@ -1481,7 +1476,8 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
/* unit us */
/* FIXME: find a proper value */
- wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS;
+ ieee80211_hw_set(wl->hw, SIGNAL_DBM);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PS);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 717c4f5a02c2..49aca2cf7605 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -24,6 +24,7 @@
#include <linux/ip.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
+#include <linux/irq.h>
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
@@ -578,7 +579,7 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
[PART_TOP_PRCM_ELP_SOC] = {
- .mem = { .start = 0x00A02000, .size = 0x00010000 },
+ .mem = { .start = 0x00A00000, .size = 0x00012000 },
.reg = { .start = 0x00807000, .size = 0x00005000 },
.mem2 = { .start = 0x00800000, .size = 0x0000B000 },
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
@@ -862,6 +863,7 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
{
u32 tmp;
int ret;
+ u16 irq_invert;
BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
WL18XX_PHY_INIT_MEM_SIZE);
@@ -911,6 +913,28 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
/* re-enable FDSP clock */
ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
MEM_FDSP_CLK_120_ENABLE);
+ if (ret < 0)
+ goto out;
+
+ ret = irq_get_trigger_type(wl->irq);
+ if ((ret == IRQ_TYPE_LEVEL_LOW) || (ret == IRQ_TYPE_EDGE_FALLING)) {
+ wl1271_info("using inverted interrupt logic: %d", ret);
+ ret = wlcore_set_partition(wl,
+ &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_read(wl, TOP_FN0_CCCR_REG_32, &irq_invert);
+ if (ret < 0)
+ goto out;
+
+ irq_invert |= BIT(1);
+ ret = wl18xx_top_reg_write(wl, TOP_FN0_CCCR_REG_32, irq_invert);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+ }
out:
return ret;
@@ -1351,9 +1375,10 @@ out:
}
#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+
+static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
+ struct wl18xx_priv_conf *priv_conf)
{
- struct wl18xx_priv *priv = wl->priv;
struct wlcore_conf_file *conf_file;
const struct firmware *fw;
int ret;
@@ -1362,14 +1387,14 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
if (ret < 0) {
wl1271_error("could not get configuration binary %s: %d",
WL18XX_CONF_FILE_NAME, ret);
- goto out_fallback;
+ return ret;
}
if (fw->size != WL18XX_CONF_SIZE) {
wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
WL18XX_CONF_SIZE, fw->size);
ret = -EINVAL;
- goto out;
+ goto out_release;
}
conf_file = (struct wlcore_conf_file *) fw->data;
@@ -1379,7 +1404,7 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
"expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
conf_file->header.magic);
ret = -EINVAL;
- goto out;
+ goto out_release;
}
if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
@@ -1387,28 +1412,32 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
"expected 0x%08x got 0x%08x",
WL18XX_CONF_VERSION, conf_file->header.version);
ret = -EINVAL;
- goto out;
+ goto out_release;
}
- memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
- memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
+ memcpy(conf, &conf_file->core, sizeof(*conf));
+ memcpy(priv_conf, &conf_file->priv, sizeof(*priv_conf));
+
+out_release:
+ release_firmware(fw);
+ return ret;
+}
- goto out;
+static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+{
+ struct wl18xx_priv *priv = wl->priv;
-out_fallback:
- wl1271_warning("falling back to default config");
+ if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+ wl1271_warning("falling back to default config");
- /* apply driver default configuration */
- memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
- /* apply default private configuration */
- memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
+ /* apply driver default configuration */
+ memcpy(&wl->conf, &wl18xx_conf, sizeof(wl->conf));
+ /* apply default private configuration */
+ memcpy(&priv->conf, &wl18xx_default_priv_conf,
+ sizeof(priv->conf));
+ }
- /* For now we just fallback */
return 0;
-
-out:
- release_firmware(fw);
- return ret;
}
static int wl18xx_plt_init(struct wl1271 *wl)
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
index a433a75f3cd7..bac2364c8e72 100644
--- a/drivers/net/wireless/ti/wl18xx/reg.h
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -109,6 +109,7 @@
#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
+#define TOP_FN0_CCCR_REG_32 (WL18XX_TOP_OCP_BASE + 0x64)
/* PRCM registers */
#define PLATFORM_DETECTION 0xA0E3E0
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 0be807951afe..337223b9f6f8 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3175,8 +3175,7 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
return (u64)(unsigned long)fp;
}
-#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
- FIF_ALLMULTI | \
+#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
@@ -5966,10 +5965,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
{
int ret;
- ret = wl12xx_set_power_on(wl);
- if (ret < 0)
- return ret;
-
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
if (ret < 0)
goto out;
@@ -5985,7 +5980,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
ret = wl->ops->get_mac(wl);
out:
- wl1271_power_off(wl);
return ret;
}
@@ -6066,18 +6060,19 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
/* FIXME: find a proper value */
wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
- wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
- IEEE80211_HW_HAS_RATE_CONTROL |
- IEEE80211_HW_CONNECTION_MONITOR |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_AP_LINK_PS |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_CHANCTX_STA_CSA;
+ ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(wl->hw, AP_LINK_PS);
+ ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(wl->hw, SIGNAL_DBM);
+ ieee80211_hw_set(wl->hw, SUPPORTS_PS);
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -6432,10 +6427,22 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
else
wl->irq_flags |= IRQF_ONESHOT;
+ ret = wl12xx_set_power_on(wl);
+ if (ret < 0)
+ goto out_free_nvs;
+
+ ret = wl12xx_get_hw_info(wl);
+ if (ret < 0) {
+ wl1271_error("couldn't get hw info");
+ wl1271_power_off(wl);
+ goto out_free_nvs;
+ }
+
ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
wl->irq_flags, pdev->name, wl);
if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
+ wl1271_error("interrupt configuration failed");
+ wl1271_power_off(wl);
goto out_free_nvs;
}
@@ -6449,12 +6456,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
}
#endif
disable_irq(wl->irq);
-
- ret = wl12xx_get_hw_info(wl);
- if (ret < 0) {
- wl1271_error("couldn't get hw info");
- goto out_irq;
- }
+ wl1271_power_off(wl);
ret = wl->ops->identify_chip(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index e7af261e9198..e539d9b1b562 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1230,7 +1230,7 @@ static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
}
#define SUPPORTED_FIF_FLAGS \
- (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
+ (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
static void zd_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
@@ -1256,7 +1256,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
* we will have some issue with IPv6 which uses multicast for link
* layer address resolution.
*/
- if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
+ if (*new_flags & FIF_ALLMULTI)
zd_mc_add_all(&hash);
spin_lock_irqsave(&mac->lock, flags);
@@ -1397,10 +1397,10 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_MFP_CAPABLE;
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_UNSPEC);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0d2594395ffb..f1b2c1721917 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -52,7 +52,7 @@
* event channels are limited resource. Split event channels are
* enabled by default.
*/
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
module_param(separate_tx_rx_irq, bool, 0644);
/* The time that packets can stay on the guest Rx internal queue
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e031c943286e..c89ca26e254d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1560,9 +1560,8 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- init_timer(&queue->rx_refill_timer);
- queue->rx_refill_timer.data = (unsigned long)queue;
- queue->rx_refill_timer.function = rx_refill_timeout;
+ setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+ (unsigned long)queue);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 0c064485d1c2..fdc60db60829 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -68,6 +68,9 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
phy->irq = mdio->irq[addr];
}
+ if (of_property_read_bool(child, "broken-turn-around"))
+ mdio->phy_ignore_ta_mask |= 1 << addr;
+
/* Associate the OF node with the device structure so it
* can be looked up later */
of_node_get(child);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 00b7d9c9fe48..2f5b518b0e78 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2150,7 +2150,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
rc = lcs_detect(card);
if (rc) {
LCS_DBF_TEXT(2, setup, "dtctfail");
- dev_err(&card->dev->dev,
+ dev_err(&ccwgdev->dev,
"Detecting a network adapter for LCS devices"
" failed with rc=%d (0x%x)\n", rc, rc);
lcs_stopcard(card);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 3abac028899f..ba974a2e409f 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -175,6 +175,8 @@ struct qeth_sbp_info {
__u32 supported_funcs;
enum qeth_sbp_roles role;
__u32 hostnotification:1;
+ __u32 reflect_promisc:1;
+ __u32 reflect_promisc_primary:1;
};
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3466d3cb7647..5e20fba37bff 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -645,7 +645,8 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
return NULL;
- case IPA_CMD_SETBRIDGEPORT:
+ case IPA_CMD_SETBRIDGEPORT_IQD:
+ case IPA_CMD_SETBRIDGEPORT_OSA:
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
if (card->discipline->control_event_handler
(card, cmd))
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 7b55768a9592..beb4bdc26de5 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -237,6 +237,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELGMAC, "delgmac"},
{IPA_CMD_SETVLAN, "setvlan"},
{IPA_CMD_DELVLAN, "delvlan"},
+ {IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"},
{IPA_CMD_SETCCID, "setccid"},
{IPA_CMD_DELCCID, "delccid"},
{IPA_CMD_MODCCID, "modccid"},
@@ -249,7 +250,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELIP, "delip"},
{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
{IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
- {IPA_CMD_SETBRIDGEPORT, "set_bridge_port"},
+ {IPA_CMD_SETBRIDGEPORT_IQD, "set_bridge_port(hs)"},
{IPA_CMD_CREATE_ADDR, "create_addr"},
{IPA_CMD_DESTROY_ADDR, "destroy_addr"},
{IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1558be1af72d..6cccc9a49ede 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -92,6 +92,7 @@ enum qeth_ipa_cmds {
IPA_CMD_DELGMAC = 0x24,
IPA_CMD_SETVLAN = 0x25,
IPA_CMD_DELVLAN = 0x26,
+ IPA_CMD_SETBRIDGEPORT_OSA = 0x2b,
IPA_CMD_SETCCID = 0x41,
IPA_CMD_DELCCID = 0x42,
IPA_CMD_MODCCID = 0x43,
@@ -104,7 +105,7 @@ enum qeth_ipa_cmds {
IPA_CMD_DELIP = 0xb7,
IPA_CMD_SETADAPTERPARMS = 0xb8,
IPA_CMD_SET_DIAG_ASS = 0xb9,
- IPA_CMD_SETBRIDGEPORT = 0xbe,
+ IPA_CMD_SETBRIDGEPORT_IQD = 0xbe,
IPA_CMD_CREATE_ADDR = 0xc3,
IPA_CMD_DESTROY_ADDR = 0xc4,
IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0ea0869120cf..2e65b989a9ea 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -137,7 +137,7 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
rc = 0;
break;
case IPA_RC_L2_UNSUPPORTED_CMD:
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
break;
case IPA_RC_L2_ADDR_TABLE_FULL:
rc = -ENOSPC;
@@ -683,6 +683,39 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return rc ? -EINVAL : 0;
}
+static void qeth_promisc_to_bridge(struct qeth_card *card)
+{
+ struct net_device *dev = card->dev;
+ enum qeth_ipa_promisc_modes promisc_mode;
+ int role;
+ int rc;
+
+ QETH_CARD_TEXT(card, 3, "pmisc2br");
+
+ if (!card->options.sbp.reflect_promisc)
+ return;
+ promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON
+ : SET_PROMISC_MODE_OFF;
+ if (promisc_mode == card->info.promisc_mode)
+ return;
+
+ if (promisc_mode == SET_PROMISC_MODE_ON) {
+ if (card->options.sbp.reflect_promisc_primary)
+ role = QETH_SBP_ROLE_PRIMARY;
+ else
+ role = QETH_SBP_ROLE_SECONDARY;
+ } else
+ role = QETH_SBP_ROLE_NONE;
+
+ rc = qeth_bridgeport_setrole(card, role);
+ QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x",
+ (promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
+ if (!rc) {
+ card->options.sbp.role = role;
+ card->info.promisc_mode = promisc_mode;
+ }
+}
+
static void qeth_l2_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -704,9 +737,10 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
qeth_l2_add_mc(card, ha->addr, 1);
spin_unlock_bh(&card->mclock);
- if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- return;
- qeth_setadp_promisc_mode(card);
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ qeth_setadp_promisc_mode(card);
+ else
+ qeth_promisc_to_bridge(card);
}
static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -994,7 +1028,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_bridgeport_query_support(card);
if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev,
- "The device represents a HiperSockets Bridge Capable Port\n");
+ "The device represents a Bridge Capable Port\n");
qeth_trace_features(card);
if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1247,7 +1281,8 @@ static int qeth_l2_control_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
switch (cmd->hdr.command) {
- case IPA_CMD_SETBRIDGEPORT:
+ case IPA_CMD_SETBRIDGEPORT_OSA:
+ case IPA_CMD_SETBRIDGEPORT_IQD:
if (cmd->data.sbp.hdr.command_code ==
IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
qeth_bridge_state_change(card, cmd);
@@ -1533,7 +1568,7 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
if (data->hostevs.lost_event_mask) {
dev_info(&data->card->gdev->dev,
-"Address notification from the HiperSockets Bridge Port stopped %s (%s)\n",
+"Address notification from the Bridge Port stopped %s (%s)\n",
data->card->dev->name,
(data->hostevs.lost_event_mask == 0x01)
? "Overflow"
@@ -1617,70 +1652,80 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
{
int rc;
+ int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD);
- switch (cbctl->ipa_rc) {
- case IPA_RC_SUCCESS:
+ if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
+ (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
switch (cbctl->cmd_rc) {
case 0x0000:
rc = 0;
break;
+ case 0x2B04:
case 0x0004:
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
break;
+ case 0x2B0C:
case 0x000C: /* Not configured as bridge Port */
rc = -ENODEV; /* maybe not the best code here? */
dev_err(&card->gdev->dev,
- "The HiperSockets device is not configured as a Bridge Port\n");
+ "The device is not configured as a Bridge Port\n");
break;
+ case 0x2B14:
case 0x0014: /* Another device is Primary */
switch (setcmd) {
case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
rc = -EEXIST;
dev_err(&card->gdev->dev,
- "The HiperSockets LAN already has a primary Bridge Port\n");
+ "The LAN already has a primary Bridge Port\n");
break;
case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
rc = -EBUSY;
dev_err(&card->gdev->dev,
- "The HiperSockets device is already a primary Bridge Port\n");
+ "The device is already a primary Bridge Port\n");
break;
default:
rc = -EIO;
}
break;
+ case 0x2B18:
case 0x0018: /* This device is currently Secondary */
rc = -EBUSY;
dev_err(&card->gdev->dev,
- "The HiperSockets device is already a secondary Bridge Port\n");
+ "The device is already a secondary Bridge Port\n");
break;
+ case 0x2B1C:
case 0x001C: /* Limit for Secondary devices reached */
rc = -EEXIST;
dev_err(&card->gdev->dev,
- "The HiperSockets LAN cannot have more secondary Bridge Ports\n");
+ "The LAN cannot have more secondary Bridge Ports\n");
break;
+ case 0x2B24:
case 0x0024: /* This device is currently Primary */
rc = -EBUSY;
dev_err(&card->gdev->dev,
- "The HiperSockets device is already a primary Bridge Port\n");
+ "The device is already a primary Bridge Port\n");
break;
+ case 0x2B20:
case 0x0020: /* Not authorized by zManager */
rc = -EACCES;
dev_err(&card->gdev->dev,
- "The HiperSockets device is not authorized to be a Bridge Port\n");
+ "The device is not authorized to be a Bridge Port\n");
break;
default:
rc = -EIO;
}
- break;
- case IPA_RC_NOTSUPP:
- rc = -ENOSYS;
- break;
- case IPA_RC_UNSUPPORTED_COMMAND:
- rc = -ENOSYS;
- break;
- default:
- rc = -EIO;
- }
+ else
+ switch (cbctl->ipa_rc) {
+ case IPA_RC_NOTSUPP:
+ rc = -EOPNOTSUPP;
+ break;
+ case IPA_RC_UNSUPPORTED_COMMAND:
+ rc = -EOPNOTSUPP;
+ break;
+ default:
+ rc = -EIO;
+ }
+
if (rc) {
QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
@@ -1688,6 +1733,13 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
return rc;
}
+static inline int ipa_cmd_sbp(struct qeth_card *card)
+{
+ return (card->info.type == QETH_CARD_TYPE_IQD) ?
+ IPA_CMD_SETBRIDGEPORT_IQD :
+ IPA_CMD_SETBRIDGEPORT_OSA;
+}
+
static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
@@ -1719,7 +1771,7 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
struct _qeth_sbp_cbctl cbctl;
QETH_CARD_TEXT(card, 2, "brqsuppo");
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
if (!iob)
return;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1796,7 +1848,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "brqports");
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1808,10 +1860,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
cmd->data.sbp.hdr.seq_no = 1;
rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
(void *)&cbctl);
- if (rc)
+ if (rc < 0)
return rc;
- rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
- return rc;
+ return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
}
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
@@ -1864,7 +1915,7 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
}
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
- iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
if (!iob)
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1874,10 +1925,9 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
cmd->data.sbp.hdr.seq_no = 1;
rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
(void *)&cbctl);
- if (rc)
+ if (rc < 0)
return rc;
- rc = qeth_bridgeport_makerc(card, &cbctl, setcmd);
- return rc;
+ return qeth_bridgeport_makerc(card, &cbctl, setcmd);
}
/**
@@ -1898,7 +1948,7 @@ static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
case 0x0004:
case 0x0100:
case 0x0106:
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev,
"Setting address notification failed\n");
break;
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 59e3aa538b4d..52673cd1db99 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -23,8 +23,6 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
if (!card)
return -EINVAL;
- mutex_lock(&card->conf_mutex);
-
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
@@ -59,8 +57,6 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
rc = sprintf(buf, "%s\n", word);
}
- mutex_unlock(&card->conf_mutex);
-
return rc;
}
@@ -90,7 +86,9 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
mutex_lock(&card->conf_mutex);
- if (qeth_card_hw_is_reachable(card)) {
+ if (card->options.sbp.reflect_promisc) /* Forbid direct manipulation */
+ rc = -EPERM;
+ else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_setrole(card, role);
if (!rc)
card->options.sbp.role = role;
@@ -123,12 +121,8 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
if (!card)
return -EINVAL;
- mutex_lock(&card->conf_mutex);
-
enabled = card->options.sbp.hostnotification;
- mutex_unlock(&card->conf_mutex);
-
return sprintf(buf, "%d\n", enabled);
}
@@ -167,10 +161,72 @@ static DEVICE_ATTR(bridge_hostnotify, 0644,
qeth_bridgeport_hostnotification_show,
qeth_bridgeport_hostnotification_store);
+static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ char *state;
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->options.sbp.reflect_promisc) {
+ if (card->options.sbp.reflect_promisc_primary)
+ state = "primary";
+ else
+ state = "secondary";
+ } else
+ state = "none";
+
+ return sprintf(buf, "%s\n", state);
+}
+
+static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int enable, primary;
+ int rc = 0;
+
+ if (!card)
+ return -EINVAL;
+
+ if (sysfs_streq(buf, "none")) {
+ enable = 0;
+ primary = 0;
+ } else if (sysfs_streq(buf, "primary")) {
+ enable = 1;
+ primary = 1;
+ } else if (sysfs_streq(buf, "secondary")) {
+ enable = 1;
+ primary = 0;
+ } else
+ return -EINVAL;
+
+ mutex_lock(&card->conf_mutex);
+
+ if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
+ rc = -EPERM;
+ else {
+ card->options.sbp.reflect_promisc = enable;
+ card->options.sbp.reflect_promisc_primary = primary;
+ rc = 0;
+ }
+
+ mutex_unlock(&card->conf_mutex);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_reflect_promisc, 0644,
+ qeth_bridgeport_reflect_show,
+ qeth_bridgeport_reflect_store);
+
static struct attribute *qeth_l2_bridgeport_attrs[] = {
&dev_attr_bridge_role.attr,
&dev_attr_bridge_state.attr,
&dev_attr_bridge_hostnotify.attr,
+ &dev_attr_bridge_reflect_promisc.attr,
NULL,
};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 04e42c649134..70eb2f61bb92 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3198,8 +3198,7 @@ static int qeth_l3_set_features(struct net_device *dev,
netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
- u32 changed = dev->features ^ features;
- int err;
+ netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
@@ -3208,11 +3207,7 @@ static int qeth_l3_set_features(struct net_device *dev,
card->state == CARD_STATE_RECOVER)
return 0;
- err = qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM);
- if (err)
- dev->features = features ^ NETIF_F_RXCSUM;
-
- return err;
+ return qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
}
static const struct ethtool_ops qeth_l3_ethtool_ops = {
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index eb58afcfb73b..45d30398d7c3 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -728,7 +728,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
}
ndev = n->dev;
- if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+ if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
pr_info("multi-cast route %pI6 port %u, dev %s.\n",
daddr6->sin6_addr.s6_addr,
ntohs(daddr6->sin6_port), ndev->name);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 5c9e680aa375..e32d24ec7a11 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -27,6 +27,7 @@
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
+#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 8f6d0fb2cd80..a7cfc270bd08 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/aer.h>
#include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index bc95ce89af06..5ab2f6978209 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -379,7 +379,7 @@ void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter)
GFP_ATOMIC);
} else {
cfg80211_disconnected(padapter->pnetdev, 0, NULL,
- 0, GFP_ATOMIC);
+ 0, false, GFP_ATOMIC);
}
}
}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 0343ae386f03..458bc340aece 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1524,21 +1524,12 @@ static void vnt_configure(struct ieee80211_hw *hw,
struct vnt_private *priv = hw->priv;
u8 rx_mode = 0;
- *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC;
+ *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
- if (changed_flags & FIF_PROMISC_IN_BSS) {
- /* unconditionally log net taps */
- if (*total_flags & FIF_PROMISC_IN_BSS)
- rx_mode |= RCR_UNICAST;
- else
- rx_mode &= ~RCR_UNICAST;
- }
-
if (changed_flags & FIF_ALLMULTI) {
if (*total_flags & FIF_ALLMULTI) {
unsigned long flags;
@@ -1802,10 +1793,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(priv->hw, SIGNAL_DBM);
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ab3ab84cb0a7..2ccfbff37c42 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -785,8 +785,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
u8 rx_mode = 0;
int rc;
- *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC;
+ *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
@@ -796,14 +795,6 @@ static void vnt_configure(struct ieee80211_hw *hw,
dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
- if (changed_flags & FIF_PROMISC_IN_BSS) {
- /* unconditionally log net taps */
- if (*total_flags & FIF_PROMISC_IN_BSS)
- rx_mode |= RCR_UNICAST;
- else
- rx_mode &= ~RCR_UNICAST;
- }
-
if (changed_flags & FIF_ALLMULTI) {
if (*total_flags & FIF_ALLMULTI) {
if (priv->mc_list_count > 2)
@@ -987,10 +978,10 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
- priv->hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_TIMING_BEACON_ONLY;
+ ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
+ ieee80211_hw_set(priv->hw, SIGNAL_DBM);
+ ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
priv->hw->max_signal = 100;
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 7c87aecf4744..342e2b30c48f 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -722,7 +722,7 @@ void prism2_connect_result(wlandevice_t *wlandev, u8 failed)
void prism2_disconnected(wlandevice_t *wlandev)
{
cfg80211_disconnected(wlandev->netdev, 0, NULL,
- 0, GFP_KERNEL);
+ 0, false, GFP_KERNEL);
}
void prism2_roamed(wlandevice_t *wlandev)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e6114ff18f..eb66d36db5f7 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -21,6 +21,7 @@
#include <linux/crypto.h>
#include <linux/completion.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/idr.h>
#include <asm/unaligned.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 3f27bfd816d8..a3a3d85142e5 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -30,6 +30,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/falloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a15411c79ae9..61dac494423e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/vmalloc.h>
#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 675f2d9d1f14..2b17bddeff0f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -34,6 +34,7 @@
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 07d2996d8c1f..edc955558250 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -21,6 +21,7 @@
#include <linux/idr.h>
#include <linux/timer.h>
#include <linux/parser.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <linux/uio_driver.h>
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index ea32b386797f..83bbb26f3183 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -35,6 +35,7 @@
#include <linux/compat.h>
#include <linux/eventfd.h>
#include <linux/fs.h>
+#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 3a57a1b0fb51..b50642870a43 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -85,7 +85,7 @@ int afs_open_socket(void)
return -ENOMEM;
}
- ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
+ ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
if (ret < 0) {
destroy_workqueue(afs_async_calls);
_leave(" = %d [socket]", ret);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index d08e079ea5d3..754fd6c0b747 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -921,8 +921,8 @@ static int tcp_accept_from_sock(struct connection *con)
mutex_unlock(&connections_lock);
memset(&peeraddr, 0, sizeof(peeraddr));
- result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
- IPPROTO_TCP, &newsock);
+ result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+ SOCK_STREAM, IPPROTO_TCP, &newsock);
if (result < 0)
return -ENOMEM;
@@ -1173,8 +1173,8 @@ static void tcp_connect_to_sock(struct connection *con)
goto out;
/* Create a socket to communicate with */
- result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
- IPPROTO_TCP, &sock);
+ result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (result < 0)
goto out_err;
@@ -1258,8 +1258,8 @@ static struct socket *tcp_create_listen_sock(struct connection *con,
addr_len = sizeof(struct sockaddr_in6);
/* Create a socket to communicate with */
- result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
- IPPROTO_TCP, &sock);
+ result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (result < 0) {
log_print("Can't create listening comms socket");
goto create_out;
@@ -1365,8 +1365,8 @@ static int sctp_listen_for_all(void)
log_print("Using SCTP for communications");
- result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET,
- IPPROTO_SCTP, &sock);
+ result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+ SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
if (result < 0) {
log_print("Can't create comms socket, check SCTP is loaded");
goto out;
diff --git a/fs/splice.c b/fs/splice.c
index bfe62ae40f40..4f355a1c1a9e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -261,6 +261,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
return ret;
}
+EXPORT_SYMBOL_GPL(splice_to_pipe);
void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
{
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index ebd63fd05649..dc4254b8cbbc 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -289,5 +289,7 @@
#define UBI32_CORE1_CLK 279
#define UBI32_CORE2_CLK 280
#define EBI2_AON_CLK 281
+#define NSSTCM_CLK_SRC 282
+#define NSSTCM_CLK 283
#endif
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
new file mode 100644
index 000000000000..172744a72eb7
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -0,0 +1,45 @@
+/*
+ * Device Tree constants for the Texas Instruments DP83867 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2015 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83867_H
+#define _DT_BINDINGS_TI_DP83867_H
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
+#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
+#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
+#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
+
+/* RGMIIDCTL internal delay for rx and tx */
+#define DP83867_RGMIIDCTL_250_PS 0x0
+#define DP83867_RGMIIDCTL_500_PS 0x1
+#define DP83867_RGMIIDCTL_750_PS 0x2
+#define DP83867_RGMIIDCTL_1_NS 0x3
+#define DP83867_RGMIIDCTL_1_25_NS 0x4
+#define DP83867_RGMIIDCTL_1_50_NS 0x5
+#define DP83867_RGMIIDCTL_1_75_NS 0x6
+#define DP83867_RGMIIDCTL_2_00_NS 0x7
+#define DP83867_RGMIIDCTL_2_25_NS 0x8
+#define DP83867_RGMIIDCTL_2_50_NS 0x9
+#define DP83867_RGMIIDCTL_2_75_NS 0xa
+#define DP83867_RGMIIDCTL_3_00_NS 0xb
+#define DP83867_RGMIIDCTL_3_25_NS 0xc
+#define DP83867_RGMIIDCTL_3_50_NS 0xd
+#define DP83867_RGMIIDCTL_3_75_NS 0xe
+#define DP83867_RGMIIDCTL_4_00_NS 0xf
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 0ad5ef930b5d..de9c8140931a 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -129,4 +129,47 @@
#define USB30_1_PHY_RESET 112
#define NSSFB0_RESET 113
#define NSSFB1_RESET 114
+#define UBI32_CORE1_CLKRST_CLAMP_RESET 115
+#define UBI32_CORE1_CLAMP_RESET 116
+#define UBI32_CORE1_AHB_RESET 117
+#define UBI32_CORE1_AXI_RESET 118
+#define UBI32_CORE2_CLKRST_CLAMP_RESET 119
+#define UBI32_CORE2_CLAMP_RESET 120
+#define UBI32_CORE2_AHB_RESET 121
+#define UBI32_CORE2_AXI_RESET 122
+#define GMAC_CORE1_RESET 123
+#define GMAC_CORE2_RESET 124
+#define GMAC_CORE3_RESET 125
+#define GMAC_CORE4_RESET 126
+#define GMAC_AHB_RESET 127
+#define NSS_CH0_RST_RX_CLK_N_RESET 128
+#define NSS_CH0_RST_TX_CLK_N_RESET 129
+#define NSS_CH0_RST_RX_125M_N_RESET 130
+#define NSS_CH0_HW_RST_RX_125M_N_RESET 131
+#define NSS_CH0_RST_TX_125M_N_RESET 132
+#define NSS_CH1_RST_RX_CLK_N_RESET 133
+#define NSS_CH1_RST_TX_CLK_N_RESET 134
+#define NSS_CH1_RST_RX_125M_N_RESET 135
+#define NSS_CH1_HW_RST_RX_125M_N_RESET 136
+#define NSS_CH1_RST_TX_125M_N_RESET 137
+#define NSS_CH2_RST_RX_CLK_N_RESET 138
+#define NSS_CH2_RST_TX_CLK_N_RESET 139
+#define NSS_CH2_RST_RX_125M_N_RESET 140
+#define NSS_CH2_HW_RST_RX_125M_N_RESET 141
+#define NSS_CH2_RST_TX_125M_N_RESET 142
+#define NSS_CH3_RST_RX_CLK_N_RESET 143
+#define NSS_CH3_RST_TX_CLK_N_RESET 144
+#define NSS_CH3_RST_RX_125M_N_RESET 145
+#define NSS_CH3_HW_RST_RX_125M_N_RESET 146
+#define NSS_CH3_RST_TX_125M_N_RESET 147
+#define NSS_RST_RX_250M_125M_N_RESET 148
+#define NSS_RST_TX_250M_125M_N_RESET 149
+#define NSS_QSGMII_TXPI_RST_N_RESET 150
+#define NSS_QSGMII_CDR_RST_N_RESET 151
+#define NSS_SGMII2_CDR_RST_N_RESET 152
+#define NSS_SGMII3_CDR_RST_N_RESET 153
+#define NSS_CAL_PRBS_RST_N_RESET 154
+#define NSS_LCKDT_RST_N_RESET 155
+#define NSS_SRDS_N_RESET 156
+
#endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index e34f906647d3..2ff4a9961e1d 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
extern void bcma_driver_unregister(struct bcma_driver *drv);
+/* module_bcma_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_bcma_driver(__bcma_driver) \
+ module_driver(__bcma_driver, bcma_driver_register, \
+ bcma_driver_unregister)
+
/* Set a fallback SPROM.
* See kdoc at the function definition for complete documentation. */
extern int bcma_arch_register_fallback_sprom(
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d5cda067115a..2235aee8096a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -105,7 +105,8 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
- u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
+ u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
struct bpf_insn *insn);
};
@@ -123,15 +124,40 @@ struct bpf_prog_aux {
const struct bpf_verifier_ops *ops;
struct bpf_map **used_maps;
struct bpf_prog *prog;
- struct work_struct work;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
+struct bpf_array {
+ struct bpf_map map;
+ u32 elem_size;
+ /* 'ownership' of prog_array is claimed by the first program that
+ * is going to use this map or by the first program which FD is stored
+ * in the map to make sure that all callers and callees have the same
+ * prog_type and JITed flag
+ */
+ enum bpf_prog_type owner_prog_type;
+ bool owner_jited;
+ union {
+ char value[0] __aligned(8);
+ struct bpf_prog *prog[0] __aligned(8);
+ };
+};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_prog_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
#ifdef CONFIG_BPF_SYSCALL
void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get(struct fd f);
void bpf_map_put(struct bpf_map *map);
@@ -160,5 +186,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
+extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 656da2a12ffe..697ca7795bd9 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_BRCMPHY_H
#define _LINUX_BRCMPHY_H
+#include <linux/phy.h>
+
+/* All Broadcom Ethernet switches have a pseudo-PHY at address 30 which is used
+ * to configure the switch internal registers via MDIO accesses.
+ */
+#define BRCM_PSEUDO_PHY_ADDR 30
+
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
#define PHY_ID_BCM5241 0x0143bc30
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 606563ef8a72..9012f8775208 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -110,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
{
- return 0x01 & addr[0];
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ u32 a = *(const u32 *)addr;
+#else
+ u16 a = *(const u16 *)addr;
+#endif
+#ifdef __BIG_ENDIAN
+ return 0x01 & (a >> ((sizeof(a) * 8) - 8));
+#else
+ return 0x01 & a;
+#endif
+}
+
+static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#ifdef __BIG_ENDIAN
+ return 0x01 & ((*(const u64 *)addr) >> 56);
+#else
+ return 0x01 & (*(const u64 *)addr);
+#endif
+#else
+ return is_multicast_ether_addr(addr);
+#endif
}
/**
@@ -169,6 +191,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
}
/**
+ * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
+ * @proto: Ethertype/length value to be tested
+ *
+ * Check that the value from the Ethertype/length field is a valid Ethertype.
+ *
+ * Return true if the valid is an 802.3 supported Ethertype.
+ */
+static inline bool eth_proto_is_802_3(__be16 proto)
+{
+#ifndef __BIG_ENDIAN
+ /* if CPU is little endian mask off bits representing LSB */
+ proto &= htons(0xFF00);
+#endif
+ /* cast both to u16 and compare since LSB can be ignored */
+ return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
+}
+
+/**
* eth_random_addr - Generate software assigned random Ethernet address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fa11b3a367be..17724f6ea983 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -207,6 +207,16 @@ struct bpf_prog_aux;
.off = OFF, \
.imm = 0 })
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
@@ -267,6 +277,14 @@ struct bpf_prog_aux;
.off = 0, \
.imm = 0 })
+/* Internal classic blocks for direct assignment */
+
+#define __BPF_STMT(CODE, K) \
+ ((struct sock_filter) BPF_STMT(CODE, K))
+
+#define __BPF_JUMP(CODE, K, JT, JF) \
+ ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
+
#define bytes_to_bpf_size(bytes) \
({ \
int bpf_size = -EINVAL; \
@@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb);
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
void bpf_prog_free(struct bpf_prog *fp);
-int bpf_convert_filter(struct sock_filter *prog, int len,
- struct bpf_insn *new_prog, int *new_len);
-
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
__bpf_prog_free(fp);
}
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
+ unsigned int flen);
+
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ bpf_aux_classic_check_t trans);
void bpf_prog_destroy(struct bpf_prog *fp);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
-
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 15928f0647e4..6ba7cf23748f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -368,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+struct page_frag_cache;
+extern void *__alloc_page_frag(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void __free_page_frag(void *addr);
+
extern void __free_kmem_pages(struct page *page, unsigned int order);
extern void free_kmem_pages(unsigned long addr, unsigned int order);
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 66a7d7600f43..b49cf923becc 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -74,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
struct module;
struct pppox_proto {
- int (*create)(struct net *net, struct socket *sock);
+ int (*create)(struct net *net, struct socket *sock, int kern);
int (*ioctl)(struct socket *sock, unsigned int cmd,
unsigned long arg);
struct module *owner;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 920e4457ce6e..67ce5bd3b56a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
/**
* __vlan_get_tag - get the VLAN ID that is part of the payload
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not of VLAN type
*/
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
/**
* __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if @skb->vlan_tci is not set correctly
*/
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
*
* Returns error if the skb is not VLAN tagged
*/
@@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*/
proto = vhdr->h_vlan_encapsulated_proto;
- if (ntohs(proto) >= ETH_P_802_3_MIN) {
+ if (eth_proto_is_802_3(proto)) {
skb->protocol = proto;
return;
}
@@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
return features;
}
+/**
+ * compare_vlan_header - Compare two vlan headers
+ * @h1: Pointer to vlan header
+ * @h2: Pointer to vlan header
+ *
+ * Compare two vlan headers, returns 0 if equal.
+ *
+ * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ */
+static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
+ const struct vlan_hdr *h2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return *(u32 *)h1 ^ *(u32 *)h2;
+#else
+ return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
+ ((__force u32)h1->h_vlan_encapsulated_proto ^
+ (__force u32)h2->h_vlan_encapsulated_proto);
+#endif
+}
#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 2c677afeea47..193ad488d3e2 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
#endif
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
index 66c30a763b10..11f00cdabe3d 100644
--- a/include/linux/mdio-gpio.h
+++ b/include/linux/mdio-gpio.h
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
bool mdio_active_low;
bool mdo_active_low;
- unsigned int phy_mask;
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
int irqs[PHY_MAX_ADDR];
/* reset callback */
int (*reset)(struct mii_bus *bus);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 83e80ab94500..ad31e476873f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -46,8 +46,9 @@
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
-#define MSIX_LEGACY_SZ 4
#define MIN_MSIX_P_PORT 5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+ (dev_cap).num_ports * MIN_MSIX_P_PORT)
#define MLX4_MAX_100M_UNITS_VAL 255 /*
* work around: can't set values
@@ -528,7 +529,6 @@ struct mlx4_caps {
int num_eqs;
int reserved_eqs;
int num_comp_vectors;
- int comp_pool;
int num_mpts;
int max_fmr_maps;
int num_mtts;
@@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 2695ced222df..abc4767695e4 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_query_cq_mbox_out *out);
int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq, u16 cq_period,
+ u16 cq_max_count);
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index abf65c790421..b943cd9e2097 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -35,6 +35,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
@@ -70,6 +73,14 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
__mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+ u64 ___t = MLX5_GET64(typ, p, fld); \
+ pr_debug(#fld " = 0x%llx\n", ___t); \
+ ___t; \
+})
+
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -115,6 +132,10 @@ enum {
};
enum {
+ MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
MLX5_MIN_PKEY_TABLE_SIZE = 128,
MLX5_MAX_LOG_PKEY_TABLE = 5,
};
@@ -264,6 +285,7 @@ enum {
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
MLX5_OPCODE_SEND = 0x0a,
MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_LSO = 0x0e,
MLX5_OPCODE_RDMA_READ = 0x10,
MLX5_OPCODE_ATOMIC_CS = 0x11,
MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +334,6 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
-enum {
- HCA_CAP_OPMOD_GET_MAX = 0,
- HCA_CAP_OPMOD_GET_CUR = 1,
- HCA_CAP_OPMOD_GET_ODP_MAX = 4,
- HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -541,6 +556,10 @@ struct mlx5_cmd_prot_block {
u8 sig;
};
+enum {
+ MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
struct mlx5_err_cqe {
u8 rsvd0[32];
__be32 srqn;
@@ -554,13 +573,22 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[17];
+ u8 rsvd0[4];
+ u8 lro_tcppsh_abort_dupack;
+ u8 lro_min_ttl;
+ __be16 lro_tcp_win;
+ __be32 lro_ack_seq_num;
+ __be32 rss_hash_result;
+ u8 rss_hash_type;
u8 ml_path;
- u8 rsvd20[4];
+ u8 rsvd20[2];
+ __be16 check_sum;
__be16 slid;
__be32 flags_rqpn;
- u8 rsvd28[4];
- __be32 srqn;
+ u8 hds_ip_ext;
+ u8 l4_hdr_type_etc;
+ __be16 vlan_info;
+ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
@@ -571,6 +599,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+ return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+ CQE_L4_HDR_TYPE_NONE = 0x0,
+ CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
+ CQE_L4_HDR_TYPE_UDP = 0x2,
+ CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
+ CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
+};
+
+enum {
+ CQE_RSS_HTYPE_IP = 0x3 << 6,
+ CQE_RSS_HTYPE_L4 = 0x3 << 2,
+};
+
+enum {
+ CQE_L2_OK = 1 << 0,
+ CQE_L3_OK = 1 << 1,
+ CQE_L4_OK = 1 << 2,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -996,4 +1058,135 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+ VPORT_STATE_DOWN = 0x0,
+ VPORT_STATE_UP = 0x1,
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
+ MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
+ MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
+ MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
+};
+
+enum {
+ MLX5_MATCH_OUTER_HEADERS = 1 << 0,
+ MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
+ MLX5_MATCH_INNER_HEADERS = 1 << 2,
+
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
+};
+
+enum {
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum mlx5_cap_type {
+ MLX5_CAP_GENERAL = 0,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ /* NUM OF CAP Types */
+ MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+ MLX5_CMD_STAT_OK = 0x0,
+ MLX5_CMD_STAT_INT_ERR = 0x1,
+ MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
+ MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
+ MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
+ MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
+ MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_LIM_ERR = 0x8,
+ MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
+ MLX5_CMD_STAT_IX_ERR = 0xa,
+ MLX5_CMD_STAT_NO_RES_ERR = 0xf,
+ MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
+ MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
+ MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
+ MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
+ MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+ if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+ return 0;
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9a90e7523dc2..c0930f8d7021 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,7 +44,6 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
enum {
MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
};
enum {
- MLX5_MAX_EQ_NAME = 32
+ MLX5_MAX_IRQ_NAME = 32
};
enum {
@@ -108,6 +107,7 @@ enum {
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
+ MLX5_REG_PVLC = 0x500f,
MLX5_REG_PMLP = 0, /* TBD */
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_CLIENT_REREG,
};
+enum mlx5_port_status {
+ MLX5_PORT_UP = 1 << 1,
+ MLX5_PORT_DOWN = 1 << 2,
+};
+
struct mlx5_uuar_info {
struct mlx5_uar *uars;
int num_uars;
@@ -269,56 +274,7 @@ struct mlx5_cmd {
struct mlx5_port_caps {
int gid_table_len;
int pkey_table_len;
-};
-
-struct mlx5_general_caps {
- u8 log_max_eq;
- u8 log_max_cq;
- u8 log_max_qp;
- u8 log_max_mkey;
- u8 log_max_pd;
- u8 log_max_srq;
- u8 log_max_strq;
- u8 log_max_mrw_sz;
- u8 log_max_bsf_list_size;
- u8 log_max_klm_list_size;
- u32 max_cqes;
- int max_wqes;
- u32 max_eqes;
- u32 max_indirection;
- int max_sq_desc_sz;
- int max_rq_desc_sz;
- int max_dc_sq_desc_sz;
- u64 flags;
- u16 stat_rate_support;
- int log_max_msg;
- int num_ports;
- u8 log_max_ra_res_qp;
- u8 log_max_ra_req_qp;
- int max_srq_wqes;
- int bf_reg_size;
- int bf_regs_per_page;
- struct mlx5_port_caps port[MLX5_MAX_PORTS];
- u8 ext_port_cap[MLX5_MAX_PORTS];
- int max_vf;
- u32 reserved_lkey;
- u8 local_ca_ack_delay;
- u8 log_max_mcg;
- u32 max_qp_mcg;
- int min_page_sz;
- int pd_cap;
- u32 max_qp_counters;
- u32 pkey_table_size;
- u8 log_max_ra_req_dc;
- u8 log_max_ra_res_dc;
- u32 uar_sz;
- u8 min_log_pg_sz;
- u8 log_max_xrcd;
- u16 log_uar_page_sz;
-};
-
-struct mlx5_caps {
- struct mlx5_general_caps gen;
+ u8 ext_port_cap;
};
struct mlx5_cmd_mailbox {
@@ -334,8 +290,6 @@ struct mlx5_buf_list {
struct mlx5_buf {
struct mlx5_buf_list direct;
- struct mlx5_buf_list *page_list;
- int nbufs;
int npages;
int size;
u8 page_shift;
@@ -351,7 +305,6 @@ struct mlx5_eq {
u8 eqn;
int nent;
u64 mask;
- char name[MLX5_MAX_EQ_NAME];
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
@@ -387,6 +340,8 @@ struct mlx5_core_mr {
enum mlx5_res_type {
MLX5_RES_QP,
+ MLX5_RES_SRQ,
+ MLX5_RES_XSRQ,
};
struct mlx5_core_rsc_common {
@@ -396,6 +351,7 @@ struct mlx5_core_rsc_common {
};
struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
int max_gs;
@@ -414,7 +370,6 @@ struct mlx5_eq_table {
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
- struct msix_entry *msix_arr;
int num_comp_vectors;
/* protect EQs list
*/
@@ -467,9 +422,16 @@ struct mlx5_mr_table {
struct radix_tree_root tree;
};
+struct mlx5_irq_info {
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
+ struct msix_entry *msix_arr;
+ struct mlx5_irq_info *irq_info;
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
@@ -520,7 +482,9 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_caps caps;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
void (*event) (struct mlx5_core_dev *dev,
@@ -529,6 +493,7 @@ struct mlx5_core_dev {
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
+ u32 issi;
};
struct mlx5_db {
@@ -549,6 +514,11 @@ enum {
MLX5_COMP_EQ_SIZE = 1024,
};
+enum {
+ MLX5_PTYS_IB = 1 << 0,
+ MLX5_PTYS_EN = 1 << 2,
+};
+
struct mlx5_db_pgdir {
struct list_head list;
DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -584,13 +554,44 @@ struct mlx5_pas {
u8 log_sz;
};
+enum port_state_policy {
+ MLX5_AAA_000
+};
+
+enum phy_port_state {
+ MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+ u32 field_select;
+ bool sm_virt_aware;
+ bool has_smi;
+ bool has_raw;
+ enum port_state_policy policy;
+ enum phy_port_state phys_state;
+ enum ib_port_state vport_state;
+ u8 port_physical_state;
+ u64 sys_image_guid;
+ u64 port_guid;
+ u64 node_guid;
+ u32 cap_mask1;
+ u32 cap_mask1_perm;
+ u32 cap_mask2;
+ u32 cap_mask2_perm;
+ u16 lid;
+ u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+ u8 lmc;
+ u8 subnet_timeout;
+ u16 sm_lid;
+ u8 sm_sl;
+ u16 qkey_violation_counter;
+ u16 pkey_violation_counter;
+ bool grh_required;
+};
+
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
- if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
return buf->direct.buf + offset;
- else
- return buf->page_list[offset >> PAGE_SHIFT].buf +
- (offset & (PAGE_SIZE - 1));
}
extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(void);
void __init mlx5_health_init(void);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen);
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
@@ -734,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
+
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+ u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+ u8 *proto_oper, int proto_mask,
+ u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+ u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -799,6 +827,7 @@ struct mlx5_interface {
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
struct mlx5_profile {
u64 mask;
@@ -809,4 +838,14 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+ if (param > 4) {
+ pr_warn("gid table length is zero\n");
+ return 0;
+ }
+
+ return 8 * (1 << param);
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644
index 000000000000..5f922c6d4fc2
--- /dev/null
+++ b/include/linux/mlx5/flow_table.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+ u8 log_sz;
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index cb3ad17edd1f..6d2f6fee041c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -28,12 +28,45 @@
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- */
-
+*/
#ifndef MLX5_IFC_H
#define MLX5_IFC_H
enum {
+ MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1,
+ MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2,
+ MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3,
+ MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13,
+ MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14,
+ MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c,
+ MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d,
+ MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4,
+ MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5,
+ MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7,
+ MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc,
+ MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10,
+ MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11,
+ MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12,
+ MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8,
+ MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9,
+ MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+ MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+ MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b,
+ MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
+ MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa,
+ MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb
+};
+
+enum {
+ MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
+ MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
+ MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
+ MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -43,6 +76,8 @@ enum {
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_QUERY_ISSI = 0x10a,
+ MLX5_CMD_OP_SET_ISSI = 0x10b,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -66,6 +101,7 @@ enum {
MLX5_CMD_OP_2ERR_QP = 0x507,
MLX5_CMD_OP_2RST_QP = 0x50a,
MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
MLX5_CMD_OP_CREATE_PSV = 0x600,
MLX5_CMD_OP_DESTROY_PSV = 0x601,
@@ -73,7 +109,10 @@ enum {
MLX5_CMD_OP_DESTROY_SRQ = 0x701,
MLX5_CMD_OP_QUERY_SRQ = 0x702,
MLX5_CMD_OP_ARM_RQ = 0x703,
- MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
+ MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
+ MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
MLX5_CMD_OP_CREATE_DCT = 0x710,
MLX5_CMD_OP_DESTROY_DCT = 0x711,
MLX5_CMD_OP_DRAIN_DCT = 0x712,
@@ -85,8 +124,12 @@ enum {
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
- MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
@@ -98,7 +141,7 @@ enum {
MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
MLX5_CMD_OP_ACCESS_REG = 0x805,
MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
- MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807,
MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
MLX5_CMD_OP_MAD_IFC = 0x50d,
MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
@@ -106,23 +149,22 @@ enum {
MLX5_CMD_OP_NOP = 0x80d,
MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
- MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
- MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
- MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
- MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
- MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
- MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
- MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
- MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
- MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
+ MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
+ MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
MLX5_CMD_OP_QUERY_TIR = 0x903,
- MLX5_CMD_OP_CREATE_TIS = 0x912,
- MLX5_CMD_OP_MODIFY_TIS = 0x913,
- MLX5_CMD_OP_DESTROY_TIS = 0x914,
- MLX5_CMD_OP_QUERY_TIS = 0x915,
MLX5_CMD_OP_CREATE_SQ = 0x904,
MLX5_CMD_OP_MODIFY_SQ = 0x905,
MLX5_CMD_OP_DESTROY_SQ = 0x906,
@@ -135,9 +177,430 @@ enum {
MLX5_CMD_OP_MODIFY_RMP = 0x90d,
MLX5_CMD_OP_DESTROY_RMP = 0x90e,
MLX5_CMD_OP_QUERY_RMP = 0x90f,
- MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
- MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
- MLX5_CMD_OP_MAX = 0x911
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_MODIFY_RQT = 0x917,
+ MLX5_CMD_OP_DESTROY_RQT = 0x918,
+ MLX5_CMD_OP_QUERY_RQT = 0x919,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
+ MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
+ MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+ u8 outer_dmac[0x1];
+ u8 outer_smac[0x1];
+ u8 outer_ether_type[0x1];
+ u8 reserved_0[0x1];
+ u8 outer_first_prio[0x1];
+ u8 outer_first_cfi[0x1];
+ u8 outer_first_vid[0x1];
+ u8 reserved_1[0x1];
+ u8 outer_second_prio[0x1];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0x1];
+ u8 reserved_2[0x1];
+ u8 outer_sip[0x1];
+ u8 outer_dip[0x1];
+ u8 outer_frag[0x1];
+ u8 outer_ip_protocol[0x1];
+ u8 outer_ip_ecn[0x1];
+ u8 outer_ip_dscp[0x1];
+ u8 outer_udp_sport[0x1];
+ u8 outer_udp_dport[0x1];
+ u8 outer_tcp_sport[0x1];
+ u8 outer_tcp_dport[0x1];
+ u8 outer_tcp_flags[0x1];
+ u8 outer_gre_protocol[0x1];
+ u8 outer_gre_key[0x1];
+ u8 outer_vxlan_vni[0x1];
+ u8 reserved_3[0x5];
+ u8 source_eswitch_port[0x1];
+
+ u8 inner_dmac[0x1];
+ u8 inner_smac[0x1];
+ u8 inner_ether_type[0x1];
+ u8 reserved_4[0x1];
+ u8 inner_first_prio[0x1];
+ u8 inner_first_cfi[0x1];
+ u8 inner_first_vid[0x1];
+ u8 reserved_5[0x1];
+ u8 inner_second_prio[0x1];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0x1];
+ u8 reserved_6[0x1];
+ u8 inner_sip[0x1];
+ u8 inner_dip[0x1];
+ u8 inner_frag[0x1];
+ u8 inner_ip_protocol[0x1];
+ u8 inner_ip_ecn[0x1];
+ u8 inner_ip_dscp[0x1];
+ u8 inner_udp_sport[0x1];
+ u8 inner_udp_dport[0x1];
+ u8 inner_tcp_sport[0x1];
+ u8 inner_tcp_dport[0x1];
+ u8 inner_tcp_flags[0x1];
+ u8 reserved_7[0x9];
+
+ u8 reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+ u8 ft_support[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x2];
+ u8 log_max_ft_size[0x6];
+ u8 reserved_2[0x10];
+ u8 max_ft_level[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x18];
+ u8 log_max_ft_num[0x8];
+
+ u8 reserved_5[0x18];
+ u8 log_max_destination[0x8];
+
+ u8 reserved_6[0x18];
+ u8 log_max_flow[0x8];
+
+ u8 reserved_7[0x40];
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+ u8 send[0x1];
+ u8 receive[0x1];
+ u8 write[0x1];
+ u8 read[0x1];
+ u8 reserved_0[0x1];
+ u8 srq_receive[0x1];
+ u8 reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 vlan_tag[0x1];
+ u8 reserved_0[0x1];
+ u8 frag[0x1];
+ u8 reserved_1[0x4];
+ u8 tcp_flags[0x9];
+
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+
+ u8 src_ip[4][0x20];
+
+ u8 dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 reserved_0[0x20];
+
+ u8 reserved_1[0x10];
+ u8 source_port[0x10];
+
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+
+ u8 outer_second_vlan_tag[0x1];
+ u8 inner_second_vlan_tag[0x1];
+ u8 reserved_2[0xe];
+ u8 gre_protocol[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 vxlan_vni[0x18];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+
+ u8 reserved_6[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+
+ u8 reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+ u8 pa_h[0x20];
+
+ u8 pa_l[0x14];
+ u8 reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+ u8 hi[0x20];
+
+ u8 lo[0x20];
+};
+
+enum {
+ MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
+ MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
+ MLX5_ADS_STAT_RATE_10GBPS = 0x8,
+ MLX5_ADS_STAT_RATE_30GBPS = 0x9,
+ MLX5_ADS_STAT_RATE_5GBPS = 0xa,
+ MLX5_ADS_STAT_RATE_20GBPS = 0xb,
+ MLX5_ADS_STAT_RATE_40GBPS = 0xc,
+ MLX5_ADS_STAT_RATE_60GBPS = 0xd,
+ MLX5_ADS_STAT_RATE_80GBPS = 0xe,
+ MLX5_ADS_STAT_RATE_120GBPS = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+ u8 fl[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_0[0xe];
+ u8 pkey_index[0x10];
+
+ u8 reserved_1[0x8];
+ u8 grh[0x1];
+ u8 mlid[0x7];
+ u8 rlid[0x10];
+
+ u8 ack_timeout[0x5];
+ u8 reserved_2[0x3];
+ u8 src_addr_index[0x8];
+ u8 reserved_3[0x4];
+ u8 stat_rate[0x4];
+ u8 hop_limit[0x8];
+
+ u8 reserved_4[0x4];
+ u8 tclass[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+
+ u8 reserved_5[0x4];
+ u8 f_dscp[0x1];
+ u8 f_ecn[0x1];
+ u8 reserved_6[0x1];
+ u8 f_eth_prio[0x1];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+ u8 udp_sport[0x10];
+
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 sl[0x4];
+ u8 port[0x8];
+ u8 rmac_47_32[0x10];
+
+ u8 rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+ u8 reserved_1[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+ u8 reserved_2[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+ u8 reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 reserved_0[0x6];
+ u8 max_lso_cap[0x5];
+ u8 reserved_1[0x4];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reserved_2[0x3];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 reserved_3[0x2];
+ u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x10];
+ u8 lro_min_mss_size[0x10];
+
+ u8 reserved_6[0x120];
+
+ u8 lro_timer_supported_periods[4][0x20];
+
+ u8 reserved_7[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+ u8 roce_apm[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+
+ u8 reserved_2[0xc];
+ u8 l3_type[0x4];
+ u8 reserved_3[0x8];
+ u8 roce_version[0x8];
+
+ u8 reserved_4[0x10];
+ u8 r_roce_dest_udp_port[0x10];
+
+ u8 r_roce_max_src_udp_port[0x10];
+ u8 r_roce_min_src_udp_port[0x10];
+
+ u8 reserved_5[0x10];
+ u8 roce_address_table_size[0x10];
+
+ u8 reserved_6[0x700];
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+ u8 reserved_0[0x40];
+
+ u8 atomic_req_endianness[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 atomic_operations[0x10];
+
+ u8 reserved_4[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_5[0x10];
+ u8 atomic_size_dc[0x10];
+
+ u8 reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ u8 reserved_0[0x40];
+
+ u8 sig[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+ u8 reserved_3[0x720];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_STRQ = 0x2,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
+};
+
+enum {
+ MLX5_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CAP_PORT_TYPE_ETH = 0x1,
};
struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_1[0xb];
u8 log_max_qp[0x5];
- u8 log_max_strq_sz[0x8];
- u8 reserved_2[0x3];
- u8 log_max_srqs[0x5];
+ u8 reserved_2[0xb];
+ u8 log_max_srq[0x5];
u8 reserved_3[0x10];
u8 reserved_4[0x8];
@@ -185,123 +647,2112 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_15[0x1d];
+ u8 reserved_15[0xd];
+ u8 gid_table_size[0x10];
- u8 reserved_16[0x6];
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 reserved_16[0x4];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
- u8 eswitch_owner[0x1];
- u8 reserved_17[0xa];
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 reserved_17[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 reserved_18[0x4];
u8 local_ca_ack_delay[0x5];
- u8 reserved_18[0x8];
+ u8 reserved_19[0x6];
+ u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_19[0x3];
+ u8 reserved_20[0x3];
u8 log_max_msg[0x5];
- u8 reserved_20[0x18];
+ u8 reserved_21[0x18];
u8 stat_rate_support[0x10];
- u8 reserved_21[0x10];
+ u8 reserved_22[0xc];
+ u8 cqe_version[0x4];
- u8 reserved_22[0x10];
+ u8 compact_address_vector[0x1];
+ u8 reserved_23[0xe];
+ u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_23[0x1];
+ u8 reserved_24[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_24[0x1];
+ u8 reserved_25[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
- u8 dc[0x1];
- u8 reserved_25[0x2];
+ u8 dct[0x1];
+ u8 reserved_26[0x1];
+ u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 rsz_srq[0x1];
+ u8 reserved_27[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 sniffer_rule_flow[0x1];
- u8 sniffer_rule_vport[0x1];
- u8 sniffer_rule_phy[0x1];
- u8 reserved_26[0x1];
+ u8 reserved_28[0x3];
+ u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_27[0x3];
+ u8 reserved_29[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 reserved_30[0x1];
u8 cd[0x1];
- u8 reserved_28[0x1];
+ u8 reserved_31[0x1];
u8 apm[0x1];
- u8 reserved_29[0x7];
+ u8 reserved_32[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_30[0x4];
+ u8 reserved_33[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_31[0xa];
+ u8 reserved_34[0xa];
u8 uar_sz[0x6];
- u8 reserved_32[0x8];
+ u8 reserved_35[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_33[0xa];
+ u8 reserved_36[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_37[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_34[0x10];
+ u8 reserved_38[0x10];
- u8 reserved_35[0x10];
+ u8 reserved_39[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_36[0x10];
+ u8 reserved_40[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_37[0x10];
+ u8 reserved_41[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_38[0x7];
+ u8 reserved_42[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_39[0x18];
+ u8 reserved_43[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_40[0xb];
+ u8 reserved_44[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_45[0x3];
u8 log_max_pd[0x5];
- u8 reserved_41[0xb];
+ u8 reserved_46[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_42[0x20];
+ u8 reserved_47[0x20];
- u8 reserved_43[0x3];
+ u8 reserved_48[0x3];
u8 log_max_rq[0x5];
- u8 reserved_44[0x3];
+ u8 reserved_49[0x3];
u8 log_max_sq[0x5];
- u8 reserved_45[0x3];
+ u8 reserved_50[0x3];
u8 log_max_tir[0x5];
- u8 reserved_46[0x3];
+ u8 reserved_51[0x3];
u8 log_max_tis[0x5];
- u8 reserved_47[0x13];
- u8 log_max_rq_per_tir[0x5];
- u8 reserved_48[0x3];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_52[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_53[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_54[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_55[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_49[0xe0];
+ u8 reserved_56[0x3];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_57[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_58[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_59[0x3];
+ u8 log_min_stride_sz_sq[0x5];
- u8 reserved_50[0x10];
+ u8 reserved_60[0x1b];
+ u8 log_max_wq_sz[0x5];
+
+ u8 reserved_61[0xa0];
+
+ u8 reserved_62[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_63[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_51[0x100];
+ u8 reserved_64[0x100];
- u8 reserved_52[0x1f];
+ u8 reserved_65[0x1f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_53[0x220];
+ u8 reserved_66[0x220];
+};
+
+enum {
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1,
+ MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+ u8 reserved_0[0xa00];
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0,
+ MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1,
+};
+
+enum {
+ MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0,
+ MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_0[0x18];
+
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_1[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x8];
+ u8 uar_page[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 hw_counter[0x20];
+
+ u8 sw_counter[0x20];
+
+ u8 reserved_4[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_5[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_6[0x3];
+ u8 log_wq_sz[0x5];
+
+ u8 reserved_7[0x4e0];
+
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_0[0x10];
+ u8 mac_addr_47_32[0x10];
+
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+ u8 reserved_0[0xa0];
+
+ u8 min_time_between_cnps[0x20];
+
+ u8 reserved_1[0x12];
+ u8 cnp_dscp[0x6];
+ u8 reserved_2[0x5];
+ u8 cnp_802p_prio[0x3];
+
+ u8 reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+ u8 reserved_0[0x60];
+
+ u8 reserved_1[0x4];
+ u8 clamp_tgt_rate[0x1];
+ u8 reserved_2[0x3];
+ u8 clamp_tgt_rate_after_time_inc[0x1];
+ u8 reserved_3[0x17];
+
+ u8 reserved_4[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_5[0xe0];
+
+ u8 rate_to_set_on_first_cnp[0x20];
+
+ u8 dce_tcp_g[0x20];
+
+ u8 dce_tcp_rtt[0x20];
+
+ u8 rate_reduce_monitor_period[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 initial_alpha_value[0x20];
+
+ u8 reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+ u8 reserved_0[0x80];
+
+ u8 rppp_max_rps[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_1[0x640];
+};
+
+enum {
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+ u8 resize_field_select[0x20];
+};
+
+enum {
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+ u8 modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+ u8 field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+ u8 field_select_r_roce_rp[0x20];
+};
+
+enum {
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+ u8 field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 symbol_errors_high[0x20];
+
+ u8 symbol_errors_low[0x20];
+
+ u8 sync_headers_errors_high[0x20];
+
+ u8 sync_headers_errors_low[0x20];
+
+ u8 edpl_bip_errors_lane0_high[0x20];
+
+ u8 edpl_bip_errors_lane0_low[0x20];
+
+ u8 edpl_bip_errors_lane1_high[0x20];
+
+ u8 edpl_bip_errors_lane1_low[0x20];
+
+ u8 edpl_bip_errors_lane2_high[0x20];
+
+ u8 edpl_bip_errors_lane2_low[0x20];
+
+ u8 edpl_bip_errors_lane3_high[0x20];
+
+ u8 edpl_bip_errors_lane3_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+ u8 rs_fec_corrected_blocks_high[0x20];
+
+ u8 rs_fec_corrected_blocks_low[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_high[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_low[0x20];
+
+ u8 rs_fec_no_errors_blocks_high[0x20];
+
+ u8 rs_fec_no_errors_blocks_low[0x20];
+
+ u8 rs_fec_single_error_blocks_high[0x20];
+
+ u8 rs_fec_single_error_blocks_low[0x20];
+
+ u8 rs_fec_corrected_symbols_total_high[0x20];
+
+ u8 rs_fec_corrected_symbols_total_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_low[0x20];
+
+ u8 link_down_events[0x20];
+
+ u8 successful_recovery_events[0x20];
+
+ u8 reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+ u8 transmit_queue_high[0x20];
+
+ u8 transmit_queue_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+ u8 rx_octets_high[0x20];
+
+ u8 rx_octets_low[0x20];
+
+ u8 reserved_0[0xc0];
+
+ u8 rx_frames_high[0x20];
+
+ u8 rx_frames_low[0x20];
+
+ u8 tx_octets_high[0x20];
+
+ u8 tx_octets_low[0x20];
+
+ u8 reserved_1[0xc0];
+
+ u8 tx_frames_high[0x20];
+
+ u8 tx_frames_low[0x20];
+
+ u8 rx_pause_high[0x20];
+
+ u8 rx_pause_low[0x20];
+
+ u8 rx_pause_duration_high[0x20];
+
+ u8 rx_pause_duration_low[0x20];
+
+ u8 tx_pause_high[0x20];
+
+ u8 tx_pause_low[0x20];
+
+ u8 tx_pause_duration_high[0x20];
+
+ u8 tx_pause_duration_low[0x20];
+
+ u8 rx_pause_transition_high[0x20];
+
+ u8 rx_pause_transition_low[0x20];
+
+ u8 reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+ u8 port_transmit_wait_high[0x20];
+
+ u8 port_transmit_wait_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+ u8 dot3stats_alignment_errors_high[0x20];
+
+ u8 dot3stats_alignment_errors_low[0x20];
+
+ u8 dot3stats_fcs_errors_high[0x20];
+
+ u8 dot3stats_fcs_errors_low[0x20];
+
+ u8 dot3stats_single_collision_frames_high[0x20];
+
+ u8 dot3stats_single_collision_frames_low[0x20];
+
+ u8 dot3stats_multiple_collision_frames_high[0x20];
+
+ u8 dot3stats_multiple_collision_frames_low[0x20];
+
+ u8 dot3stats_sqe_test_errors_high[0x20];
+
+ u8 dot3stats_sqe_test_errors_low[0x20];
+
+ u8 dot3stats_deferred_transmissions_high[0x20];
+
+ u8 dot3stats_deferred_transmissions_low[0x20];
+
+ u8 dot3stats_late_collisions_high[0x20];
+
+ u8 dot3stats_late_collisions_low[0x20];
+
+ u8 dot3stats_excessive_collisions_high[0x20];
+
+ u8 dot3stats_excessive_collisions_low[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_low[0x20];
+
+ u8 dot3stats_carrier_sense_errors_high[0x20];
+
+ u8 dot3stats_carrier_sense_errors_low[0x20];
+
+ u8 dot3stats_frame_too_longs_high[0x20];
+
+ u8 dot3stats_frame_too_longs_low[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_low[0x20];
+
+ u8 dot3stats_symbol_errors_high[0x20];
+
+ u8 dot3stats_symbol_errors_low[0x20];
+
+ u8 dot3control_in_unknown_opcodes_high[0x20];
+
+ u8 dot3control_in_unknown_opcodes_low[0x20];
+
+ u8 dot3in_pause_frames_high[0x20];
+
+ u8 dot3in_pause_frames_low[0x20];
+
+ u8 dot3out_pause_frames_high[0x20];
+
+ u8 dot3out_pause_frames_low[0x20];
+
+ u8 reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+ u8 ether_stats_drop_events_high[0x20];
+
+ u8 ether_stats_drop_events_low[0x20];
+
+ u8 ether_stats_octets_high[0x20];
+
+ u8 ether_stats_octets_low[0x20];
+
+ u8 ether_stats_pkts_high[0x20];
+
+ u8 ether_stats_pkts_low[0x20];
+
+ u8 ether_stats_broadcast_pkts_high[0x20];
+
+ u8 ether_stats_broadcast_pkts_low[0x20];
+
+ u8 ether_stats_multicast_pkts_high[0x20];
+
+ u8 ether_stats_multicast_pkts_low[0x20];
+
+ u8 ether_stats_crc_align_errors_high[0x20];
+
+ u8 ether_stats_crc_align_errors_low[0x20];
+
+ u8 ether_stats_undersize_pkts_high[0x20];
+
+ u8 ether_stats_undersize_pkts_low[0x20];
+
+ u8 ether_stats_oversize_pkts_high[0x20];
+
+ u8 ether_stats_oversize_pkts_low[0x20];
+
+ u8 ether_stats_fragments_high[0x20];
+
+ u8 ether_stats_fragments_low[0x20];
+
+ u8 ether_stats_jabbers_high[0x20];
+
+ u8 ether_stats_jabbers_low[0x20];
+
+ u8 ether_stats_collisions_high[0x20];
+
+ u8 ether_stats_collisions_low[0x20];
+
+ u8 ether_stats_pkts64octets_high[0x20];
+
+ u8 ether_stats_pkts64octets_low[0x20];
+
+ u8 ether_stats_pkts65to127octets_high[0x20];
+
+ u8 ether_stats_pkts65to127octets_low[0x20];
+
+ u8 ether_stats_pkts128to255octets_high[0x20];
+
+ u8 ether_stats_pkts128to255octets_low[0x20];
+
+ u8 ether_stats_pkts256to511octets_high[0x20];
+
+ u8 ether_stats_pkts256to511octets_low[0x20];
+
+ u8 ether_stats_pkts512to1023octets_high[0x20];
+
+ u8 ether_stats_pkts512to1023octets_low[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_high[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_low[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_high[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_low[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_high[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_low[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_high[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_low[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_high[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_low[0x20];
+
+ u8 reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+ u8 if_in_octets_high[0x20];
+
+ u8 if_in_octets_low[0x20];
+
+ u8 if_in_ucast_pkts_high[0x20];
+
+ u8 if_in_ucast_pkts_low[0x20];
+
+ u8 if_in_discards_high[0x20];
+
+ u8 if_in_discards_low[0x20];
+
+ u8 if_in_errors_high[0x20];
+
+ u8 if_in_errors_low[0x20];
+
+ u8 if_in_unknown_protos_high[0x20];
+
+ u8 if_in_unknown_protos_low[0x20];
+
+ u8 if_out_octets_high[0x20];
+
+ u8 if_out_octets_low[0x20];
+
+ u8 if_out_ucast_pkts_high[0x20];
+
+ u8 if_out_ucast_pkts_low[0x20];
+
+ u8 if_out_discards_high[0x20];
+
+ u8 if_out_discards_low[0x20];
+
+ u8 if_out_errors_high[0x20];
+
+ u8 if_out_errors_low[0x20];
+
+ u8 if_in_multicast_pkts_high[0x20];
+
+ u8 if_in_multicast_pkts_low[0x20];
+
+ u8 if_in_broadcast_pkts_high[0x20];
+
+ u8 if_in_broadcast_pkts_low[0x20];
+
+ u8 if_out_multicast_pkts_high[0x20];
+
+ u8 if_out_multicast_pkts_low[0x20];
+
+ u8 if_out_broadcast_pkts_high[0x20];
+
+ u8 if_out_broadcast_pkts_low[0x20];
+
+ u8 reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+ u8 a_frames_transmitted_ok_high[0x20];
+
+ u8 a_frames_transmitted_ok_low[0x20];
+
+ u8 a_frames_received_ok_high[0x20];
+
+ u8 a_frames_received_ok_low[0x20];
+
+ u8 a_frame_check_sequence_errors_high[0x20];
+
+ u8 a_frame_check_sequence_errors_low[0x20];
+
+ u8 a_alignment_errors_high[0x20];
+
+ u8 a_alignment_errors_low[0x20];
+
+ u8 a_octets_transmitted_ok_high[0x20];
+
+ u8 a_octets_transmitted_ok_low[0x20];
+
+ u8 a_octets_received_ok_high[0x20];
+
+ u8 a_octets_received_ok_low[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_high[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_low[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_high[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_low[0x20];
+
+ u8 a_multicast_frames_received_ok_high[0x20];
+
+ u8 a_multicast_frames_received_ok_low[0x20];
+
+ u8 a_broadcast_frames_received_ok_high[0x20];
+
+ u8 a_broadcast_frames_received_ok_low[0x20];
+
+ u8 a_in_range_length_errors_high[0x20];
+
+ u8 a_in_range_length_errors_low[0x20];
+
+ u8 a_out_of_range_length_field_high[0x20];
+
+ u8 a_out_of_range_length_field_low[0x20];
+
+ u8 a_frame_too_long_errors_high[0x20];
+
+ u8 a_frame_too_long_errors_low[0x20];
+
+ u8 a_symbol_error_during_carrier_high[0x20];
+
+ u8 a_symbol_error_during_carrier_low[0x20];
+
+ u8 a_mac_control_frames_transmitted_high[0x20];
+
+ u8 a_mac_control_frames_transmitted_low[0x20];
+
+ u8 a_mac_control_frames_received_high[0x20];
+
+ u8 a_mac_control_frames_received_low[0x20];
+
+ u8 a_unsupported_opcodes_received_high[0x20];
+
+ u8 a_unsupported_opcodes_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+ u8 reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+ u8 command_completion_vector[0x20];
+
+ u8 reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+ u8 reserved_0[0x18];
+ u8 port_num[0x1];
+ u8 reserved_1[0x3];
+ u8 vl[0x4];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+ u8 event_subtype[0x8];
+ u8 reserved_0[0x8];
+ u8 congestion_level[0x8];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+ u8 reserved_0[0x60];
+
+ u8 gpio_event_hi[0x20];
+
+ u8 gpio_event_lo[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+ u8 reserved_0[0x40];
+
+ u8 port_num[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+ u8 reserved_0[0xe0];
+};
+
+enum {
+ MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
+ MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+ u8 reserved_0[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 r_key[0x20];
+
+ u8 reserved_0[0x10];
+ u8 packet_len[0x10];
+
+ u8 rdma_op_len[0x20];
+
+ u8 rdma_va[0x40];
+
+ u8 reserved_1[0x5];
+ u8 rdma[0x1];
+ u8 write[0x1];
+ u8 requestor[0x1];
+ u8 qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 reserved_0[0x10];
+ u8 wqe_index[0x10];
+
+ u8 reserved_1[0x10];
+ u8 len[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x5];
+ u8 rdma[0x1];
+ u8 write_read[0x1];
+ u8 requestor[0x1];
+ u8 qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+ u8 reserved_0[0xa0];
+
+ u8 type[0x8];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 cq_number[0x18];
+};
+
+enum {
+ MLX5_QPC_STATE_RST = 0x0,
+ MLX5_QPC_STATE_INIT = 0x1,
+ MLX5_QPC_STATE_RTR = 0x2,
+ MLX5_QPC_STATE_RTS = 0x3,
+ MLX5_QPC_STATE_SQER = 0x4,
+ MLX5_QPC_STATE_ERR = 0x6,
+ MLX5_QPC_STATE_SQD = 0x7,
+ MLX5_QPC_STATE_SUSPENDED = 0x9,
+};
+
+enum {
+ MLX5_QPC_ST_RC = 0x0,
+ MLX5_QPC_ST_UC = 0x1,
+ MLX5_QPC_ST_UD = 0x2,
+ MLX5_QPC_ST_XRC = 0x3,
+ MLX5_QPC_ST_DCI = 0x5,
+ MLX5_QPC_ST_QP0 = 0x7,
+ MLX5_QPC_ST_QP1 = 0x8,
+ MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
+ MLX5_QPC_ST_REG_UMR = 0xc,
+};
+
+enum {
+ MLX5_QPC_PM_STATE_ARMED = 0x0,
+ MLX5_QPC_PM_STATE_REARM = 0x1,
+ MLX5_QPC_PM_STATE_RESERVED = 0x2,
+ MLX5_QPC_PM_STATE_MIGRATED = 0x3,
+};
+
+enum {
+ MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
+ MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
+};
+
+enum {
+ MLX5_QPC_MTU_256_BYTES = 0x1,
+ MLX5_QPC_MTU_512_BYTES = 0x2,
+ MLX5_QPC_MTU_1K_BYTES = 0x3,
+ MLX5_QPC_MTU_2K_BYTES = 0x4,
+ MLX5_QPC_MTU_4K_BYTES = 0x5,
+ MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
+};
+
+enum {
+ MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
+ MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
+};
+
+enum {
+ MLX5_QPC_CS_REQ_DISABLE = 0x0,
+ MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
+ MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
+};
+
+enum {
+ MLX5_QPC_CS_RES_DISABLE = 0x0,
+ MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
+ MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+ u8 state[0x4];
+ u8 reserved_0[0x4];
+ u8 st[0x8];
+ u8 reserved_1[0x3];
+ u8 pm_state[0x2];
+ u8 reserved_2[0x7];
+ u8 end_padding_mode[0x2];
+ u8 reserved_3[0x2];
+
+ u8 wq_signature[0x1];
+ u8 block_lb_mc[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 reserved_4[0x1];
+ u8 drain_sigerr[0x1];
+ u8 reserved_5[0x2];
+ u8 pd[0x18];
+
+ u8 mtu[0x3];
+ u8 log_msg_max[0x5];
+ u8 reserved_6[0x1];
+ u8 log_rq_size[0x4];
+ u8 log_rq_stride[0x3];
+ u8 no_sq[0x1];
+ u8 log_sq_size[0x4];
+ u8 reserved_7[0x6];
+ u8 rlky[0x1];
+ u8 reserved_8[0x4];
+
+ u8 counter_set_id[0x8];
+ u8 uar_page[0x18];
+
+ u8 reserved_9[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 remote_qpn[0x18];
+
+ struct mlx5_ifc_ads_bits primary_address_path;
+
+ struct mlx5_ifc_ads_bits secondary_address_path;
+
+ u8 log_ack_req_freq[0x4];
+ u8 reserved_11[0x4];
+ u8 log_sra_max[0x3];
+ u8 reserved_12[0x2];
+ u8 retry_count[0x3];
+ u8 rnr_retry[0x3];
+ u8 reserved_13[0x1];
+ u8 fre[0x1];
+ u8 cur_rnr_retry[0x3];
+ u8 cur_retry_count[0x3];
+ u8 reserved_14[0x5];
+
+ u8 reserved_15[0x20];
+
+ u8 reserved_16[0x8];
+ u8 next_send_psn[0x18];
+
+ u8 reserved_17[0x8];
+ u8 cqn_snd[0x18];
+
+ u8 reserved_18[0x40];
+
+ u8 reserved_19[0x8];
+ u8 last_acked_psn[0x18];
+
+ u8 reserved_20[0x8];
+ u8 ssn[0x18];
+
+ u8 reserved_21[0x8];
+ u8 log_rra_max[0x3];
+ u8 reserved_22[0x1];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 reserved_23[0x1];
+ u8 page_offset[0x6];
+ u8 reserved_24[0x3];
+ u8 cd_slave_receive[0x1];
+ u8 cd_slave_send[0x1];
+ u8 cd_master[0x1];
+
+ u8 reserved_25[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 next_rcv_psn[0x18];
+
+ u8 reserved_26[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_27[0x8];
+ u8 cqn_rcv[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 q_key[0x20];
+
+ u8 reserved_28[0x5];
+ u8 rq_type[0x3];
+ u8 srqn_rmpn[0x18];
+
+ u8 reserved_29[0x8];
+ u8 rmsn[0x18];
+
+ u8 hw_sq_wqebb_counter[0x10];
+ u8 sw_sq_wqebb_counter[0x10];
+
+ u8 hw_rq_counter[0x20];
+
+ u8 sw_rq_counter[0x20];
+
+ u8 reserved_30[0x20];
+
+ u8 reserved_31[0xf];
+ u8 cgs[0x1];
+ u8 cs_req[0x8];
+ u8 cs_res[0x8];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+ u8 source_l3_address[16][0x8];
+
+ u8 reserved_0[0x3];
+ u8 vlan_valid[0x1];
+ u8 vlan_id[0xc];
+ u8 source_mac_47_32[0x10];
+
+ u8 source_mac_31_0[0x20];
+
+ u8 reserved_1[0x14];
+ u8 roce_l3_type[0x4];
+ u8 roce_version[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_odp_cap_bits odp_cap;
+ struct mlx5_ifc_atomic_caps_bits atomic_caps;
+ struct mlx5_ifc_roce_cap_bits roce_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+ struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ u8 reserved_0[0x8000];
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+ u8 reserved_0[0x20];
+
+ u8 group_id[0x20];
+
+ u8 reserved_1[0x8];
+ u8 flow_tag[0x18];
+
+ u8 reserved_2[0x10];
+ u8 action[0x10];
+
+ u8 reserved_3[0x8];
+ u8 destination_list_size[0x18];
+
+ u8 reserved_4[0x160];
+
+ struct mlx5_ifc_fte_match_param_bits match_value;
+
+ u8 reserved_5[0x600];
+
+ struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+ MLX5_XRC_SRQC_STATE_GOOD = 0x0,
+ MLX5_XRC_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+ u8 state[0x4];
+ u8 log_xrc_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_2[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 user_index_equal_xrc_srqn[0x1];
+ u8 reserved_4[0x1];
+ u8 log_page_size[0x6];
+ u8 user_index[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_7[0x40];
+
+ u8 db_record_addr_h[0x20];
+
+ u8 db_record_addr_l[0x1e];
+ u8 reserved_8[0x2];
+
+ u8 reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 reserved_0[0xc];
+ u8 prio[0x4];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x100];
+
+ u8 reserved_3[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_4[0x3c0];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
+ MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_0[0x20];
+
+ u8 disp_type[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x40];
+
+ u8 reserved_3[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_ip_payload_size[0x8];
+
+ u8 reserved_4[0x40];
+
+ u8 reserved_5[0x8];
+ u8 inline_rqn[0x18];
+
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_6[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_7[0x5];
+ u8 indirect_table[0x18];
+
+ u8 rx_hash_fn[0x4];
+ u8 reserved_8[0x2];
+ u8 self_lb_block[0x2];
+ u8 transport_domain[0x18];
+
+ u8 rx_hash_toeplitz_key[10][0x20];
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+ u8 reserved_9[0x4c0];
+};
+
+enum {
+ MLX5_SRQC_STATE_GOOD = 0x0,
+ MLX5_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+ u8 state[0x4];
+ u8 log_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 reserved_2[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_3[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x2];
+ u8 log_page_size[0x6];
+ u8 reserved_6[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_9[0x40];
+
+ u8 dbr_addr[0x40];
+
+ u8 reserved_10[0x80];
+};
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlky[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0xa0];
+
+ u8 tis_lst_sz[0x10];
+ u8 reserved_5[0x10];
+
+ u8 reserved_6[0x40];
+
+ u8 reserved_7[0x8];
+ u8 tis_num_0[0x18];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_0[0xa0];
+
+ u8 reserved_1[0x10];
+ u8 rqt_max_size[0x10];
+
+ u8 reserved_2[0x10];
+ u8 rqt_actual_size[0x10];
+
+ u8 reserved_3[0x6a0];
+
+ struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlky[0x1];
+ u8 reserved_0[0x2];
+ u8 vsd[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_2[0x12];
+
+ u8 reserved_3[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 reserved_5[0x18];
+
+ u8 reserved_6[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_7[0xe0];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_RMPC_STATE_RDY = 0x1,
+ MLX5_RMPC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+ u8 reserved_0[0x8];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x140];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_0[0x1f];
+ u8 roce_en[0x1];
+
+ u8 reserved_1[0x760];
+
+ u8 reserved_2[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_3[0xc];
+ u8 allowed_list_size[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+ u8 reserved_4[0x20];
+
+ u8 current_uc_mac_address[0][0x40];
+};
+
+enum {
+ MLX5_MKC_ACCESS_MODE_PA = 0x0,
+ MLX5_MKC_ACCESS_MODE_MTT = 0x1,
+ MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_0[0x1];
+ u8 free[0x1];
+ u8 reserved_1[0xd];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_2[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_4[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_5[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_6[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_7[0x1b];
+ u8 log_page_size[0x5];
+
+ u8 reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+ u8 reserved_0[0x10];
+ u8 pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+ u8 array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+ u8 field_select[0x20];
+
+ u8 reserved_0[0xe0];
+
+ u8 sm_virt_aware[0x1];
+ u8 has_smi[0x1];
+ u8 has_raw[0x1];
+ u8 grh_required[0x1];
+ u8 reserved_1[0xc];
+ u8 port_physical_state[0x4];
+ u8 vport_state_policy[0x4];
+ u8 port_state[0x4];
+ u8 vport_state[0x4];
+
+ u8 reserved_2[0x20];
+
+ u8 system_image_guid[0x40];
+
+ u8 port_guid[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 cap_mask1[0x20];
+
+ u8 cap_mask1_field_select[0x20];
+
+ u8 cap_mask2[0x20];
+
+ u8 cap_mask2_field_select[0x20];
+
+ u8 reserved_3[0x80];
+
+ u8 lid[0x10];
+ u8 reserved_4[0x4];
+ u8 init_type_reply[0x4];
+ u8 lmc[0x3];
+ u8 subnet_timeout[0x5];
+
+ u8 sm_lid[0x10];
+ u8 sm_sl[0x4];
+ u8 reserved_5[0xc];
+
+ u8 qkey_violation_counter[0x10];
+ u8 pkey_violation_counter[0x10];
+
+ u8 reserved_6[0xca0];
+};
+
+enum {
+ MLX5_EQC_STATUS_OK = 0x0,
+ MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
+};
+
+enum {
+ MLX5_EQC_ST_ARMED = 0x9,
+ MLX5_EQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x9];
+ u8 ec[0x1];
+ u8 oi[0x1];
+ u8 reserved_1[0x5];
+ u8 st[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_5[0x6];
+
+ u8 reserved_6[0x3];
+ u8 log_eq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x18];
+ u8 intr[0x8];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_10[0x18];
+
+ u8 reserved_11[0x60];
+
+ u8 reserved_12[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_13[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_14[0x80];
+};
+
+enum {
+ MLX5_DCTC_STATE_ACTIVE = 0x0,
+ MLX5_DCTC_STATE_DRAINING = 0x1,
+ MLX5_DCTC_STATE_DRAINED = 0x2,
+};
+
+enum {
+ MLX5_DCTC_CS_RES_DISABLE = 0x0,
+ MLX5_DCTC_CS_RES_NA = 0x1,
+ MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
+};
+
+enum {
+ MLX5_DCTC_MTU_256_BYTES = 0x1,
+ MLX5_DCTC_MTU_512_BYTES = 0x2,
+ MLX5_DCTC_MTU_1K_BYTES = 0x3,
+ MLX5_DCTC_MTU_2K_BYTES = 0x4,
+ MLX5_DCTC_MTU_4K_BYTES = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 rlky[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_4[0xd];
+
+ u8 reserved_5[0x8];
+ u8 cs_res[0x8];
+ u8 reserved_6[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 reserved_7[0x8];
+
+ u8 reserved_8[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_9[0x8];
+ u8 pd[0x18];
+
+ u8 tclass[0x8];
+ u8 reserved_10[0x4];
+ u8 flow_label[0x14];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_11[0x5];
+ u8 mtu[0x3];
+ u8 port[0x8];
+ u8 pkey_index[0x10];
+
+ u8 reserved_12[0x8];
+ u8 my_addr_index[0x8];
+ u8 reserved_13[0x8];
+ u8 hop_limit[0x8];
+
+ u8 dc_access_key_violation_count[0x20];
+
+ u8 reserved_14[0x14];
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+
+ u8 reserved_15[0x40];
+};
+
+enum {
+ MLX5_CQC_STATUS_OK = 0x0,
+ MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
+ MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
+};
+
+enum {
+ MLX5_CQC_CQE_SZ_64_BYTES = 0x0,
+ MLX5_CQC_CQE_SZ_128_BYTES = 0x1,
+};
+
+enum {
+ MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6,
+ MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9,
+ MLX5_CQC_ST_FIRED = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x4];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_1[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 reserved_2[0x2];
+ u8 cqe_zip_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_6[0x6];
+
+ u8 reserved_7[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_8[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+
+ u8 reserved_9[0x18];
+ u8 c_eqn[0x8];
+
+ u8 reserved_10[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_11[0x18];
+
+ u8 reserved_12[0x20];
+
+ u8 reserved_13[0x8];
+ u8 last_notified_index[0x18];
+
+ u8 reserved_14[0x8];
+ u8 last_solicit_index[0x18];
+
+ u8 reserved_15[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_16[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_17[0x40];
+
+ u8 dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+ struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ u8 reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 reserved_2[0x10];
+ u8 vsd_vendor_id[0x10];
+
+ u8 vsd[208][0x8];
+
+ u8 vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+ struct mlx5_ifc_modify_field_select_bits modify_field_select;
+ struct mlx5_ifc_resize_field_select_bits resize_field_select;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+ struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+ struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+ struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ u8 reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+ struct mlx5_ifc_comp_event_bits comp_event;
+ struct mlx5_ifc_dct_events_bits dct_events;
+ struct mlx5_ifc_qp_events_bits qp_events;
+ struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+ struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+ struct mlx5_ifc_cq_error_bits cq_error;
+ struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+ struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+ struct mlx5_ifc_gpio_event_bits gpio_event;
+ struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+ struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+ struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+ u8 reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+ u8 reserved_0[0x100];
+
+ u8 assert_existptr[0x20];
+
+ u8 assert_callra[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 fw_version[0x20];
+
+ u8 hw_id[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 irisc_index[0x8];
+ u8 synd[0x8];
+ u8 ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+ u8 no_lb[0x1];
+ u8 reserved_0[0x7];
+ u8 port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 profile[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x6];
+ u8 demux_mode[0x2];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
};
struct mlx5_ifc_set_hca_cap_in_bits {
@@ -313,10 +2764,653 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 reserved_2[0x40];
- struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_query_hca_cap_in_bits {
+struct mlx5_ifc_set_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 admin_state[0x4];
+ u8 state[0x4];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits received_errors;
+
+ struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+ u8 reserved_2[0xa00];
+};
+
+enum {
+ MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x60];
+
+ u8 clear[0x1];
+ u8 reserved_4[0x1f];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_3[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 rx_write_requests[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 rx_read_requests[0x20];
+
+ u8 reserved_3[0x20];
+
+ u8 rx_atomic_requests[0x20];
+
+ u8 reserved_4[0x20];
+
+ u8 rx_dct_connect[0x20];
+
+ u8 reserved_5[0x20];
+
+ u8 out_of_buffer[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 out_of_sequence[0x20];
+
+ u8 reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_3[0x1f];
+
+ u8 reserved_4[0x18];
+ u8 counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+};
+
+enum {
+ MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1,
+ MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2,
+ MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 bsf0_klm0_pas_mtt0_1[16][0x8];
+
+ u8 bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -326,6 +3420,146 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 reserved_2[0x40];
};
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 reserved_2[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_2[0xa0];
+
+ u8 supported_issi_reserved[76][0x8];
+ u8 supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 gids_num[0x10];
+ u8 reserved_2[0x10];
+
+ struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
@@ -334,16 +3568,3216 @@ struct mlx5_ifc_query_hca_cap_out_bits {
u8 reserved_1[0x40];
- u8 capability_struct[256][0x8];
+ union mlx5_ifc_hca_cap_union_bits capability;
};
-struct mlx5_ifc_set_hca_cap_out_bits {
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x80];
+
+ u8 reserved_2[0x8];
+ u8 level[0x8];
+ u8 reserved_3[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x1c0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+enum {
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_3[0xa0];
+
+ u8 reserved_4[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_2[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_3[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
u8 status[0x8];
u8 reserved_0[0x18];
u8 syndrome[0x20];
u8 reserved_1[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 cur_flows[0x20];
+
+ u8 sum_flows[0x20];
+
+ u8 cnp_ignored_high[0x20];
+
+ u8 cnp_ignored_low[0x20];
+
+ u8 cnp_handled_high[0x20];
+
+ u8 cnp_handled_low[0x20];
+
+ u8 reserved_2[0x100];
+
+ u8 time_stamp_high[0x20];
+
+ u8 time_stamp_low[0x20];
+
+ u8 accumulators_period[0x20];
+
+ u8 ecn_marked_roce_packets_high[0x20];
+
+ u8 ecn_marked_roce_packets_low[0x20];
+
+ u8 cnps_sent_high[0x20];
+
+ u8 cnps_sent_low[0x20];
+
+ u8 reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 clear[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 error[0x1];
+ u8 reserved_2[0x4];
+ u8 rdma[0x1];
+ u8 read_write[0x1];
+ u8 req_res[0x1];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x18];
+ u8 admin_state[0x4];
+ u8 reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 sq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rmp_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+ u8 reserved_0[0x1c];
+ u8 permanent_address[0x1];
+ u8 addresses_list[0x1];
+ u8 roce_en[0x1];
+ u8 reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+ u8 reserved_3[0x780];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
+ MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+ u8 reserved_3[0x80];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 output_num_entries[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 pas[0][0x40];
+};
+
+enum {
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1,
+ MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 input_num_entries[0x20];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 remote_lid[0x10];
+ u8 reserved_2[0x8];
+ u8 port[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 packet_headers_log[128][0x8];
+
+ u8 packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 psvn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_4[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 reserved_2[0x8];
+ u8 psv0_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 psv1_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 psv2_index[0x18];
+
+ u8 reserved_5[0x8];
+ u8 psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_psv[0x4];
+ u8 reserved_2[0x4];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_4[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 reserved_5[0x560];
+
+ u8 klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x8];
+ u8 level[0x8];
+ u8 reserved_6[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 group_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 table_type[0x8];
+ u8 reserved_3[0x18];
+
+ u8 reserved_4[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_7[0xa0];
+
+ u8 reserved_8[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_3[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_4[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srq_number[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dct_number[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 register_data[0][0x20];
+};
+
+enum {
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 register_id[0x10];
+
+ u8 argument[0x20];
+
+ u8 register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x7];
+ u8 polarity[0x1];
+ u8 ob_tap0[0x8];
+ u8 ob_tap1[0x8];
+ u8 ob_tap2[0x8];
+
+ u8 reserved_4[0xc];
+ u8 ob_preemp_mode[0x4];
+ u8 ob_reg[0x8];
+ u8 ob_bias[0x8];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 time_to_link_up[0x10];
+ u8 reserved_2[0xc];
+ u8 grade_lane_speed[0x4];
+
+ u8 grade_version[0x8];
+ u8 grade[0x18];
+
+ u8 reserved_3[0x4];
+ u8 height_grade_type[0x4];
+ u8 height_grade[0x18];
+
+ u8 height_dz[0x10];
+ u8 height_dv[0x10];
+
+ u8 reserved_4[0x10];
+ u8 height_sigma[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x4];
+ u8 phase_grade_type[0x4];
+ u8 phase_grade[0x18];
+
+ u8 reserved_7[0x8];
+ u8 phase_eo_pos[0x8];
+ u8 reserved_8[0x8];
+ u8 phase_eo_neg[0x8];
+
+ u8 ffe_set_tested[0x10];
+ u8 test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 vl_hw_cap[0x4];
+
+ u8 reserved_3[0x1c];
+ u8 vl_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 proto_mask[0x3];
+
+ u8 reserved_2[0x40];
+
+ u8 eth_proto_capability[0x20];
+
+ u8 ib_link_width_capability[0x10];
+ u8 ib_proto_capability[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 eth_proto_admin[0x20];
+
+ u8 ib_link_width_admin[0x10];
+ u8 ib_proto_admin[0x10];
+
+ u8 reserved_4[0x20];
+
+ u8 eth_proto_oper[0x20];
+
+ u8 ib_link_width_oper[0x10];
+ u8 ib_proto_oper[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 eth_proto_lp_advertise[0x20];
+
+ u8 reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+ u8 reserved_0[0x20];
+
+ u8 algorithm_options[0x10];
+ u8 reserved_1[0x4];
+ u8 repetitions_mode[0x4];
+ u8 num_of_repetitions[0x8];
+
+ u8 grade_version[0x8];
+ u8 height_grade_type[0x4];
+ u8 phase_grade_type[0x4];
+ u8 height_grade_weight[0x8];
+ u8 phase_grade_weight[0x8];
+
+ u8 gisim_measure_bits[0x10];
+ u8 adaptive_tap_measure_bits[0x10];
+
+ u8 ber_bath_high_error_threshold[0x10];
+ u8 ber_bath_mid_error_threshold[0x10];
+
+ u8 ber_bath_low_error_threshold[0x10];
+ u8 one_ratio_high_threshold[0x10];
+
+ u8 one_ratio_high_mid_threshold[0x10];
+ u8 one_ratio_low_mid_threshold[0x10];
+
+ u8 one_ratio_low_threshold[0x10];
+ u8 ndeo_error_threshold[0x10];
+
+ u8 mixer_offset_step_size[0x10];
+ u8 reserved_2[0x8];
+ u8 mix90_phase_for_voltage_bath[0x8];
+
+ u8 mixer_offset_start[0x10];
+ u8 mixer_offset_end[0x10];
+
+ u8 reserved_3[0x15];
+ u8 ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 sub_port[0x8];
+ u8 reserved_0[0x8];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x5];
+ u8 prio[0x3];
+ u8 reserved_2[0x6];
+ u8 mode[0x2];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x10];
+ u8 min_threshold[0x10];
+
+ u8 reserved_5[0x10];
+ u8 max_threshold[0x10];
+
+ u8 reserved_6[0x10];
+ u8 mark_probability_denominator[0x10];
+
+ u8 reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x1c];
+ u8 wrps_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 wrps_status[0x4];
+
+ u8 reserved_5[0x8];
+ u8 up_threshold[0x8];
+ u8 reserved_6[0x8];
+ u8 down_threshold[0x8];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x1c];
+ u8 srps_admin[0x4];
+
+ u8 reserved_9[0x1c];
+ u8 srps_status[0x4];
+
+ u8 reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x8];
+ u8 lb_cap[0x8];
+ u8 reserved_3[0x8];
+ u8 lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_3[0x8];
+
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
+
+ u8 reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x8];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_1[0x1c];
+ u8 prio_tc[0x3];
+
+ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+ u8 reserved_0[0x3];
+ u8 single_mac[0x1];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 mac_47_32[0x10];
+
+ u8 mac_31_0[0x20];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 max_mtu[0x10];
+ u8 reserved_2[0x10];
+
+ u8 admin_mtu[0x10];
+ u8 reserved_3[0x10];
+
+ u8 oper_mtu[0x10];
+ u8 reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x18];
+ u8 attenuation_5g[0x8];
+
+ u8 reserved_3[0x18];
+ u8 attenuation_7g[0x8];
+
+ u8 reserved_4[0x18];
+ u8 attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0xc];
+ u8 module_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+ u8 module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+ u8 reserved_0[0x4];
+ u8 mlpn_status[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 e[0x1];
+ u8 reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+ u8 rxtx[0x1];
+ u8 reserved_0[0x7];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 width[0x8];
+
+ u8 lane0_module_mapping[0x20];
+
+ u8 lane1_module_mapping[0x20];
+
+ u8 lane2_module_mapping[0x20];
+
+ u8 lane3_module_mapping[0x20];
+
+ u8 reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_2[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_3[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+ u8 reserved_0[0x4];
+ u8 profile_id[0xc];
+ u8 reserved_1[0x4];
+ u8 proto_mask[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x10];
+ u8 lane_speed[0x10];
+
+ u8 reserved_4[0x17];
+ u8 lpbf[0x1];
+ u8 fec_mode_policy[0x8];
+
+ u8 retransmission_capability[0x8];
+ u8 fec_mode_capability[0x18];
+
+ u8 retransmission_support_admin[0x8];
+ u8 fec_mode_support_admin[0x18];
+
+ u8 retransmission_request_admin[0x8];
+ u8 fec_mode_request_admin[0x18];
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 ib_port[0x8];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 lbf_mode[0x3];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 dic[0x1];
+ u8 reserved_2[0x19];
+ u8 ipg[0x4];
+ u8 reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xe0];
+
+ u8 port_filter[8][0x20];
+
+ u8 port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 ppan[0x4];
+ u8 reserved_2[0x4];
+ u8 prio_mask_tx[0x8];
+ u8 reserved_3[0x8];
+ u8 prio_mask_rx[0x8];
+
+ u8 pptx[0x1];
+ u8 aptx[0x1];
+ u8 reserved_4[0x6];
+ u8 pfctx[0x8];
+ u8 reserved_5[0x10];
+
+ u8 pprx[0x1];
+ u8 aprx[0x1];
+ u8 reserved_6[0x6];
+ u8 pfcrx[0x8];
+ u8 reserved_7[0x10];
+
+ u8 reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+ u8 op[0x4];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 op_admin[0x8];
+ u8 op_capability[0x8];
+ u8 op_request[0x8];
+ u8 op_active[0x8];
+
+ u8 admin[0x40];
+
+ u8 capability[0x40];
+
+ u8 request[0x40];
+
+ u8 active[0x40];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xc];
+ u8 error_count[0x4];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0xc];
+ u8 lane[0x4];
+ u8 reserved_5[0x8];
+ u8 error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_2[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+ u8 reserved_0[0x8];
+ u8 opamp_group[0x8];
+ u8 reserved_1[0xc];
+ u8 opamp_group_type[0x4];
+
+ u8 start_index[0x10];
+ u8 reserved_2[0x4];
+ u8 num_of_indices[0xc];
+
+ u8 index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+ u8 reserved_0[0x6];
+ u8 rx_lane[0x2];
+ u8 reserved_1[0x6];
+ u8 tx_lane[0x2];
+ u8 reserved_2[0x8];
+ u8 module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+ u8 reserved_0[0x6];
+ u8 lossy[0x1];
+ u8 epsb[0x1];
+ u8 reserved_1[0xc];
+ u8 size[0xc];
+
+ u8 xoff_threshold[0x10];
+ u8 xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+ u8 node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+ u8 reserved_0[0x18];
+ u8 power_settings_level[0x8];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+ u8 he[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+ u8 reserved_0[0x20];
+
+ u8 mkey[0x20];
+
+ u8 addressh_63_32[0x20];
+
+ u8 addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+ u8 dc_key[0x40];
+
+ u8 ext[0x1];
+ u8 reserved_0[0x7];
+ u8 destination_qp_dct[0x18];
+
+ u8 static_rate[0x4];
+ u8 sl_eth_prio[0x4];
+ u8 fl[0x1];
+ u8 mlid[0x7];
+ u8 rlid_udp_sport[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 rmac_47_16[0x20];
+
+ u8 rmac_15_0[0x10];
+ u8 tclass[0x8];
+ u8 hop_limit[0x8];
+
+ u8 reserved_2[0x1];
+ u8 grh[0x1];
+ u8 reserved_3[0x2];
+ u8 src_addr_index[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+ u8 reserved_0[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+
+ u8 reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+ u8 reserved_0[0x8];
+ u8 event_type[0x8];
+ u8 reserved_1[0x8];
+ u8 event_sub_type[0x8];
+
+ u8 reserved_2[0xe0];
+
+ union mlx5_ifc_event_auto_bits event_data;
+
+ u8 reserved_3[0x10];
+ u8 signature[0x8];
+ u8 reserved_4[0x7];
+ u8 owner[0x1];
+};
+
+enum {
+ MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+ u8 type[0x8];
+ u8 reserved_0[0x18];
+
+ u8 input_length[0x20];
+
+ u8 input_mailbox_pointer_63_32[0x20];
+
+ u8 input_mailbox_pointer_31_9[0x17];
+ u8 reserved_1[0x9];
+
+ u8 command_input_inline_data[16][0x8];
+
+ u8 command_output_inline_data[16][0x8];
+
+ u8 output_mailbox_pointer_63_32[0x20];
+
+ u8 output_mailbox_pointer_31_9[0x17];
+ u8 reserved_2[0x9];
+
+ u8 output_length[0x20];
+
+ u8 token[0x8];
+ u8 signature[0x8];
+ u8 reserved_3[0x8];
+ u8 status[0x7];
+ u8 ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+ u8 mailbox_data[512][0x8];
+
+ u8 reserved_0[0x180];
+
+ u8 next_pointer_63_32[0x20];
+
+ u8 next_pointer_31_10[0x16];
+ u8 reserved_1[0xa];
+
+ u8 block_number[0x20];
+
+ u8 reserved_2[0x8];
+ u8 token[0x8];
+ u8 ctrl_signature[0x8];
+ u8 signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+ u8 ptag_63_32[0x20];
+
+ u8 ptag_31_8[0x18];
+ u8 reserved_0[0x6];
+ u8 wr_en[0x1];
+ u8 rd_en[0x1];
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+ u8 fw_rev_minor[0x10];
+ u8 fw_rev_major[0x10];
+
+ u8 cmd_interface_rev[0x10];
+ u8 fw_rev_subminor[0x10];
+
+ u8 reserved_0[0x40];
+
+ u8 cmdq_phy_addr_63_32[0x20];
+
+ u8 cmdq_phy_addr_31_12[0x14];
+ u8 reserved_1[0x2];
+ u8 nic_interface[0x2];
+ u8 log_cmdq_size[0x4];
+ u8 log_cmdq_stride[0x4];
+
+ u8 command_doorbell_vector[0x20];
+
+ u8 reserved_2[0xf00];
+
+ u8 initializing[0x1];
+ u8 reserved_3[0x4];
+ u8 nic_interface_supported[0x3];
+ u8 reserved_4[0x18];
+
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+
+ u8 no_dram_nic_offset[0x20];
+
+ u8 reserved_5[0x6e40];
+
+ u8 reserved_6[0x1f];
+ u8 clear_int[0x1];
+
+ u8 health_syndrome[0x8];
+ u8 health_counter[0x18];
+
+ u8 reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+ struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+ struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+ struct mlx5_ifc_pamp_reg_bits pamp_reg;
+ struct mlx5_ifc_paos_reg_bits paos_reg;
+ struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_peir_reg_bits peir_reg;
+ struct mlx5_ifc_pelc_reg_bits pelc_reg;
+ struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_pifr_reg_bits pifr_reg;
+ struct mlx5_ifc_pipg_reg_bits pipg_reg;
+ struct mlx5_ifc_plbf_reg_bits plbf_reg;
+ struct mlx5_ifc_plib_reg_bits plib_reg;
+ struct mlx5_ifc_plpc_reg_bits plpc_reg;
+ struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+ struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+ struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+ struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+ struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+ struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+ struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+ struct mlx5_ifc_ppad_reg_bits ppad_reg;
+ struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_pplm_reg_bits pplm_reg;
+ struct mlx5_ifc_pplr_reg_bits pplr_reg;
+ struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+ struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+ struct mlx5_ifc_pspa_reg_bits pspa_reg;
+ struct mlx5_ifc_ptas_reg_bits ptas_reg;
+ struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_pude_reg_bits pude_reg;
+ struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+ struct mlx5_ifc_slrg_reg_bits slrg_reg;
+ struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ u8 reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+ u8 reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+ struct mlx5_ifc_initial_seg_bits initial_seg;
+ u8 reserved_0[0x20060];
};
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 310b5f7fd6ae..f079fb1a31f7 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -134,13 +134,21 @@ enum {
enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
+ MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
};
enum {
+ MLX5_SEND_WQE_DS = 16,
MLX5_SEND_WQE_BB = 64,
};
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+ MLX5_SEND_WQE_MAX_WQEBBS = 16,
+};
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+enum {
+ MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
+ MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
+ MLX5_ETH_WQE_L3_CSUM = 1 << 6,
+ MLX5_ETH_WQE_L4_CSUM = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+ u8 rsvd0[4];
+ u8 cs_flags;
+ u8 rsvd1;
+ __be16 mss;
+ __be32 rsvd2;
+ __be16 inline_hdr_sz;
+ u8 inline_hdr_start[2];
+};
+
struct mlx5_wqe_xrc_seg {
__be32 xrc_srqn;
u8 rsvd[12];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644
index 000000000000..967e0fd06e89
--- /dev/null
+++ b/include/linux/mlx5/vport.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 gid_index,
+ union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ u16 vf_num,
+ struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+ u64 *node_guid);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8d37e26a1007..0038ac7466fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -226,6 +226,24 @@ struct page_frag {
#endif
};
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+ void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ __u16 offset;
+ __u16 size;
+#else
+ __u32 offset;
+#endif
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_count every time we allocate a fragment.
+ */
+ unsigned int pagecnt_bias;
+ bool pfmemalloc;
+};
+
typedef unsigned long __nocast vm_flags_t;
/*
diff --git a/include/linux/net.h b/include/linux/net.h
index 738ea48be889..04aa06852771 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -38,7 +38,6 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
-#define SOCK_EXTERNALLY_ALLOCATED 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -208,7 +207,7 @@ void sock_unregister(int family);
int __sock_create(struct net *net, int family, int type, int proto,
struct socket **res, int kern);
int sock_create(int family, int type, int proto, struct socket **res);
-int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
int sock_create_lite(int family, int type, int proto, struct socket **res);
void sock_release(struct socket *sock);
int sock_sendmsg(struct socket *sock, struct msghdr *msg);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 7d59dc6ab789..9672781c593d 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -66,7 +66,6 @@ enum {
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_BUSY_POLL_BIT, /* Busy poll */
- NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
/*
* Add your fresh new feature above and remember to update
@@ -125,7 +124,6 @@ enum {
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
-#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
@@ -161,8 +159,7 @@ enum {
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
- NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_HW_SWITCH_OFFLOAD)
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
/*
* If one device doesn't support one of these features, then disable it
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 05b9a694e213..6f5f71ff5169 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1564,7 +1564,7 @@ struct net_device {
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_SWITCHDEV
- const struct swdev_ops *swdev_ops;
+ const struct switchdev_ops *switchdev_ops;
#endif
const struct header_ops *header_ops;
@@ -1652,7 +1652,14 @@ struct net_device {
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto __rcu *ingress_cl_list;
+#endif
struct netdev_queue __rcu *ingress_queue;
+#ifdef CONFIG_NETFILTER_INGRESS
+ struct list_head nf_hooks_ingress;
+#endif
+
unsigned char broadcast[MAX_ADDR_LEN];
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rx_cpu_rmap;
@@ -1990,6 +1997,7 @@ struct offload_callbacks {
struct packet_offload {
__be16 type; /* This is really htons(ether_type). */
+ u16 priority;
struct offload_callbacks callbacks;
struct list_head list;
};
@@ -2552,10 +2560,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
- if (WARN_ON(!dev_queue)) {
- pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
- return;
- }
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -2571,15 +2575,7 @@ static inline void netif_stop_queue(struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
-static inline void netif_tx_stop_all_queues(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- netif_tx_stop_queue(txq);
- }
-}
+void netif_tx_stop_all_queues(struct net_device *dev);
static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
@@ -2840,6 +2836,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
}
#endif
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+ unsigned int num_tx_queues);
+
/*
* Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
* as a distribution range limit for the returned value.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 63560d0a8dfe..f5ff5d156da8 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -54,10 +54,12 @@ struct nf_hook_state {
struct net_device *in;
struct net_device *out;
struct sock *sk;
+ struct list_head *hook_list;
int (*okfn)(struct sock *, struct sk_buff *);
};
static inline void nf_hook_state_init(struct nf_hook_state *p,
+ struct list_head *hook_list,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
@@ -71,6 +73,7 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->in = indev;
p->out = outdev;
p->sk = sk;
+ p->hook_list = hook_list;
p->okfn = okfn;
}
@@ -79,16 +82,17 @@ typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
const struct nf_hook_state *state);
struct nf_hook_ops {
- struct list_head list;
+ struct list_head list;
/* User fills in from here down. */
- nf_hookfn *hook;
- struct module *owner;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
+ nf_hookfn *hook;
+ struct net_device *dev;
+ struct module *owner;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
- int priority;
+ int priority;
};
struct nf_sockopt_ops {
@@ -131,26 +135,33 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+ u_int8_t pf, unsigned int hook)
{
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook))
return static_key_false(&nf_hooks_needed[pf][hook]);
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(nf_hook_list);
}
#else
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+ u_int8_t pf, unsigned int hook)
{
- return !list_empty(&nf_hooks[pf][hook]);
+ return !list_empty(nf_hook_list);
}
#endif
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
+}
+
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
/**
* nf_hook_thresh - call a netfilter hook
- *
+ *
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
@@ -166,8 +177,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
if (nf_hooks_active(pf, hook)) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook, thresh, pf,
- indev, outdev, sk, okfn);
+ nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+ pf, indev, outdev, sk, okfn);
return nf_hook_slow(skb, &state);
}
return 1;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34b172301558..ffdfdc24952a 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -122,13 +122,13 @@ struct ip_set_skbinfo {
struct ip_set;
#define ext_timeout(e, s) \
-(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
#define ext_counter(e, s) \
-(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
#define ext_comment(e, s) \
-(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
#define ext_skbinfo(e, s) \
-(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
@@ -533,29 +533,9 @@ bitmap_bytes(u32 a, u32 b)
#include <linux/netfilter/ipset/ip_set_timeout.h>
#include <linux/netfilter/ipset/ip_set_comment.h>
-static inline int
+int
ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
- const void *e, bool active)
-{
- if (SET_WITH_TIMEOUT(set)) {
- unsigned long *timeout = ext_timeout(e, set);
-
- if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(active ? ip_set_timeout_get(timeout)
- : *timeout)))
- return -EMSGSIZE;
- }
- if (SET_WITH_COUNTER(set) &&
- ip_set_put_counter(skb, ext_counter(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_COMMENT(set) &&
- ip_set_put_comment(skb, ext_comment(e, set)))
- return -EMSGSIZE;
- if (SET_WITH_SKBINFO(set) &&
- ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
- return -EMSGSIZE;
- return 0;
-}
+ const void *e, bool active);
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index a3e215bb0241..09f38206c18f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -62,6 +62,7 @@ struct xt_mtchk_param {
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/**
@@ -92,6 +93,7 @@ struct xt_tgchk_param {
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
+ bool nft_compat;
};
/* Target destructor parameters */
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644
index 000000000000..cb0727fe2b3d
--- /dev/null
+++ b/include/linux/netfilter_ingress.h
@@ -0,0 +1,41 @@
+#ifndef _NETFILTER_INGRESS_H_
+#define _NETFILTER_INGRESS_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
+ NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
+ NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
+ skb->dev, NULL, NULL);
+ return nf_hook_slow(skb, &state);
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev)
+{
+ INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+}
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev) {}
+#endif /* CONFIG_NETFILTER_INGRESS */
+#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 6835c1279df7..9120edb650a0 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
__u32 dst_group;
__u32 flags;
struct sock *sk;
+ bool nsid_is_set;
+ int nsid;
};
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2f7b9a40f627..2972c7f3aa1d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2329,6 +2329,8 @@
#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+
#define PCI_VENDOR_ID_BELKIN 0x1799
#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 685809835b5c..a26c3f84b8dd 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -181,6 +181,9 @@ struct mii_bus {
/* PHY addresses to be ignored when probing */
u32 phy_mask;
+ /* PHY addresses to ignore the TA/read failure */
+ u32 phy_ignore_ta_mask;
+
/*
* Pointer to an array of interrupts, each PHY's
* interrupt at the index matching its address
@@ -675,6 +678,17 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
+ * is RGMII (all variants)
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
+{
+ return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+}
+
+/**
* phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY.
* @phydev: The phy_device struct
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 7b8e260c4a27..a2324fb45cf4 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -79,17 +79,9 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
void net_inc_ingress_queue(void);
void net_dec_ingress_queue(void);
-#else
-static inline void net_inc_ingress_queue(void)
-{
-}
-
-static inline void net_dec_ingress_queue(void)
-{
-}
#endif
extern void rtnetlink_init(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f15154a879c7..a7acc92aa668 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -34,7 +34,8 @@
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
#include <linux/sched.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
+#include <linux/splice.h>
/* A. Checksumming of received packets by device.
*
@@ -170,12 +171,14 @@ struct nf_bridge_info {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
BRNF_PROTO_PPPOE
- } orig_proto;
+ } orig_proto:8;
bool pkt_otherhost;
unsigned int mask;
struct net_device *physindev;
- struct net_device *physoutdev;
- char neigh_header[8];
+ union {
+ struct net_device *physoutdev;
+ char neigh_header[8];
+ };
__be32 ipv4_daddr;
};
#endif
@@ -859,6 +862,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int len, int odd, struct sk_buff *skb),
void *from, int length);
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+ int offset, size_t size);
+
struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
@@ -919,7 +925,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
skb->hash = hash;
}
-void __skb_get_hash(struct sk_buff *skb);
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
@@ -928,6 +933,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
{
return skb->hash;
@@ -1935,8 +1942,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- else if (skb_flow_dissect(skb, &keys))
- skb_set_transport_header(skb, keys.thoff);
+ else if (skb_flow_dissect_flow_keys(skb, &keys))
+ skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
}
@@ -2127,10 +2134,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
-
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2185,6 +2188,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+static inline void skb_free_frag(void *addr)
+{
+ __free_page_frag(addr);
+}
+
void *napi_alloc_frag(unsigned int fragsz);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
@@ -2692,9 +2700,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int len, __wsum csum);
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ssize_t skb_socket_splice(struct sock *sk,
+ struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
- unsigned int flags);
+ unsigned int flags,
+ ssize_t (*splice_cb)(struct sock *,
+ struct pipe_inode_info *,
+ struct splice_pipe_desc *));
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -2729,8 +2743,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *data, int hlen, void *buffer)
+static inline void * __must_check
+__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
{
if (hlen - offset >= len)
return data + offset;
@@ -2742,8 +2757,8 @@ static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
return buffer;
}
-static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
- int len, void *buffer)
+static inline void * __must_check
+skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
{
return __skb_header_pointer(skb, offset, len, skb->data,
skb_headlen(skb), buffer);
@@ -3050,7 +3065,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
}
} else if (skb->csum_bad) {
/* ip_summed == CHECKSUM_NONE in this case */
- return 1;
+ return (__force __sum16)1;
}
skb->csum = psum;
@@ -3298,9 +3313,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues);
-
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
@@ -3355,15 +3367,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
{
int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
- skb_transport_offset(skb);
- __u16 csum;
+ skb_transport_offset(skb);
+ __wsum partial;
- csum = csum_fold(csum_partial(skb_transport_header(skb),
- plen, skb->csum));
+ partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
skb->csum = res;
SKB_GSO_CB(skb)->csum_start -= plen;
- return csum;
+ return csum_fold(partial);
}
static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3418,10 +3429,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
- const struct flow_keys *keys, int hlen);
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+ unsigned int transport_len,
+ __sum16(*skb_chkf)(struct sk_buff *skb));
/**
* skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7f484a239f53..c735f5c91eea 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
int phy_addr;
int interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
+ struct device_node *phy_node;
struct stmmac_dma_cfg *dma_cfg;
int clk_csr;
int has_gmac;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index e8bbf403618f..48c3696e8645 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -149,11 +149,16 @@ struct tcp_sock {
* sum(delta(rcv_nxt)), or how many bytes
* were acked.
*/
+ u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn
+ * total number of segments in.
+ */
u32 rcv_nxt; /* What we want to receive next */
u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
u32 snd_nxt; /* Next sequence we send */
-
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
@@ -201,6 +206,7 @@ struct tcp_sock {
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
@@ -328,6 +334,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock *fastopen_rsk;
+ u32 *saved_syn;
};
enum tsq_flags {
@@ -395,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
return 0;
}
+static inline void tcp_saved_syn_free(struct tcp_sock *tp)
+{
+ kfree(tp->saved_syn);
+ tp->saved_syn = NULL;
+}
+
#endif /* _LINUX_TCP_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 80456f72d70a..def59d3a34d5 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -142,6 +142,7 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
void ipv6_mc_remap(struct inet6_dev *idev);
void ipv6_mc_init_dev(struct inet6_dev *idev);
void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
void addrconf_dad_failure(struct inet6_ifaddr *ifp);
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index a175ba4a7adb..4a167b30a12f 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -39,7 +39,6 @@ struct unix_skb_parms {
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define UNIXSID(skb) (&UNIXCB((skb)).secid)
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 172632dd9930..db639a4c5ab8 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -74,7 +74,7 @@ void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
- gfp_t priority, unsigned short type);
+ gfp_t priority, unsigned short type, int kern);
/**** TRANSPORT ****/
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index ea6546d2c946..c28aca25320e 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -63,6 +63,9 @@ enum {
BOND_OPT_LP_INTERVAL,
BOND_OPT_SLAVES,
BOND_OPT_TLB_DYNAMIC_LB,
+ BOND_OPT_AD_ACTOR_SYS_PRIO,
+ BOND_OPT_AD_ACTOR_SYSTEM,
+ BOND_OPT_AD_USER_PORT_KEY,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 78ed135e9dea..20defc0353d1 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -136,6 +136,9 @@ struct bond_params {
int packets_per_slave;
int tlb_dynamic_lb;
struct reciprocal_value reciprocal_packets_per_slave;
+ u16 ad_actor_sys_prio;
+ u16 ad_user_port_key;
+ u8 ad_actor_system[ETH_ALEN];
};
struct bond_parm_tbl {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f8d6813cd5b2..a741678f24a2 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -111,7 +111,7 @@ enum ieee80211_band {
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
- * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
@@ -129,7 +129,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_80MHZ = 1<<7,
IEEE80211_CHAN_NO_160MHZ = 1<<8,
IEEE80211_CHAN_INDOOR_ONLY = 1<<9,
- IEEE80211_CHAN_GO_CONCURRENT = 1<<10,
+ IEEE80211_CHAN_IR_CONCURRENT = 1<<10,
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
};
@@ -4575,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
* @ie: information elements of the deauth/disassoc frame (may be %NULL)
* @ie_len: length of IEs
* @reason: reason code for the disconnection, set it to 0 if unknown
+ * @locally_generated: disconnection was requested locally
* @gfp: allocation flags
*
* After it calls this function, the driver should enter an idle state
* and not try to connect to any AP any more.
*/
void cfg80211_disconnected(struct net_device *dev, u16 reason,
- const u8 *ie, size_t ie_len, gfp_t gfp);
+ const u8 *ie, size_t ie_len,
+ bool locally_generated, gfp_t gfp);
/**
* cfg80211_ready_on_channel - notification of remain_on_channel start
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 6ea16c84293b..290a9a69af07 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -44,6 +44,8 @@ struct cfg802154_ops {
int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
int (*set_cca_mode)(struct wpan_phy *wpan_phy,
const struct wpan_phy_cca *cca);
+ int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+ int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
int (*set_pan_id)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev, __le16 pan_id);
int (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -61,14 +63,66 @@ struct cfg802154_ops {
struct wpan_dev *wpan_dev, bool mode);
};
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+ switch (st) {
+ case NL802154_SUPPORTED_BOOL_TRUE:
+ return b;
+ case NL802154_SUPPORTED_BOOL_FALSE:
+ return !b;
+ case NL802154_SUPPORTED_BOOL_BOTH:
+ return true;
+ default:
+ WARN_ON(1);
+ }
+
+ return false;
+}
+
+struct wpan_phy_supported {
+ u32 channels[IEEE802154_MAX_PAGE + 1],
+ cca_modes, cca_opts, iftypes;
+ enum nl802154_supported_bool_states lbt;
+ u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+ min_csma_backoffs, max_csma_backoffs;
+ s8 min_frame_retries, max_frame_retries;
+ size_t tx_powers_size, cca_ed_levels_size;
+ const s32 *tx_powers, *cca_ed_levels;
+};
+
struct wpan_phy_cca {
enum nl802154_cca_modes mode;
enum nl802154_cca_opts opt;
};
-struct wpan_phy {
- struct mutex pib_lock;
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+ if (a->mode != b->mode)
+ return false;
+
+ if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+ return a->opt == b->opt;
+ return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ * transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ * level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ * setting.
+ */
+enum wpan_phy_flags {
+ WPAN_PHY_FLAG_TXPOWER = BIT(1),
+ WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
+ WPAN_PHY_FLAG_CCA_MODE = BIT(3),
+};
+
+struct wpan_phy {
/* If multiple wpan_phys are registered and you're handed e.g.
* a regular netdev with assigned ieee802154_ptr, you won't
* know whether it points to a wpan_phy your driver has registered
@@ -77,6 +131,8 @@ struct wpan_phy {
*/
const void *privid;
+ u32 flags;
+
/*
* This is a PIB according to 802.15.4-2011.
* We do not provide timing-related variables, as they
@@ -84,12 +140,14 @@ struct wpan_phy {
*/
u8 current_channel;
u8 current_page;
- u32 channels_supported[IEEE802154_MAX_PAGE + 1];
- s8 transmit_power;
+ struct wpan_phy_supported supported;
+ /* current transmit_power in mBm */
+ s32 transmit_power;
struct wpan_phy_cca cca;
__le64 perm_extended_addr;
+ /* current cca ed threshold in mBm */
s32 cca_ed_level;
/* PHY depended MAC PIB values */
@@ -121,9 +179,9 @@ struct wpan_dev {
__le64 extended_addr;
/* MAC BSN field */
- u8 bsn;
+ atomic_t bsn;
/* MAC DSN field */
- u8 dsn;
+ atomic_t dsn;
u8 min_be;
u8 max_be;
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 0a55ac715077..2d1d73cb773e 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -122,7 +122,9 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
- *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
+ __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+
+ *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
}
/* Implements RFC 1624 (Incremental Internet Checksum)
diff --git a/include/net/codel.h b/include/net/codel.h
index 1e18005f7f65..267e70210061 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -7,7 +7,7 @@
* Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
* Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -119,12 +119,14 @@ static inline u32 codel_time_to_us(codel_time_t val)
/**
* struct codel_params - contains codel parameters
* @target: target queue size (in time units)
+ * @ce_threshold: threshold for marking packets with ECN CE
* @interval: width of moving time window
* @mtu: device mtu, or minimal queue backlog in bytes.
* @ecn: is Explicit Congestion Notification enabled
*/
struct codel_params {
codel_time_t target;
+ codel_time_t ce_threshold;
codel_time_t interval;
u32 mtu;
bool ecn;
@@ -161,19 +163,24 @@ struct codel_vars {
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
* ecn_mark: number of packets we ECN marked instead of dropping
+ * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
u32 drop_count;
u32 ecn_mark;
+ u32 ce_mark;
};
+#define CODEL_DISABLED_THRESHOLD INT_MAX
+
static void codel_params_init(struct codel_params *params,
const struct Qdisc *sch)
{
params->interval = MS2TIME(100);
params->target = MS2TIME(5);
params->mtu = psched_mtu(qdisc_dev(sch));
+ params->ce_threshold = CODEL_DISABLED_THRESHOLD;
params->ecn = false;
}
@@ -354,6 +361,9 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
vars->rec_inv_sqrt);
}
end:
+ if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+ INET_ECN_set_ce(skb))
+ stats->ce_mark++;
return skb;
}
#endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 0fb99a26e973..2bc73f8a00a9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -109,7 +109,6 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
-#define DST_METRICS_FORCE_OVERWRITE 0x2UL
#define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS))
@@ -120,11 +119,6 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
return dst->_metrics & DST_METRICS_READ_ONLY;
}
-static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
-{
- dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
-}
-
void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
@@ -355,18 +349,6 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
__skb_tunnel_rx(skb, dev, net);
}
-/* Children define the path of the packet through the
- * Linux networking. Thus, destinations are stackable.
- */
-
-static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
-{
- struct dst_entry *child = dst_clone(skb_dst(skb)->child);
-
- skb_dst_drop(skb);
- return child;
-}
-
int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
static inline int dst_discard(struct sk_buff *skb)
{
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644
index 000000000000..1a8c22419936
--- /dev/null
+++ b/include/net/flow_dissector.h
@@ -0,0 +1,220 @@
+#ifndef _NET_FLOW_DISSECTOR_H
+#define _NET_FLOW_DISSECTOR_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_ether.h>
+
+/**
+ * struct flow_dissector_key_control:
+ * @thoff: Transport header offset
+ */
+struct flow_dissector_key_control {
+ u16 thoff;
+ u16 addr_type;
+};
+
+/**
+ * struct flow_dissector_key_basic:
+ * @thoff: Transport header offset
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ */
+struct flow_dissector_key_basic {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+};
+
+struct flow_dissector_key_tags {
+ u32 vlan_id:12,
+ flow_label:20;
+};
+
+struct flow_dissector_key_keyid {
+ __be32 keyid;
+};
+
+/**
+ * struct flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv4_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ __be32 src;
+ __be32 dst;
+};
+
+/**
+ * struct flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv6_addrs {
+ /* (src,dst) must be grouped, in the same way than in IP header */
+ struct in6_addr src;
+ struct in6_addr dst;
+};
+
+/**
+ * struct flow_dissector_key_tipc_addrs:
+ * @srcnode: source node address
+ */
+struct flow_dissector_key_tipc_addrs {
+ __be32 srcnode;
+};
+
+/**
+ * struct flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct flow_dissector_key_addrs {
+ union {
+ struct flow_dissector_key_ipv4_addrs v4addrs;
+ struct flow_dissector_key_ipv6_addrs v6addrs;
+ struct flow_dissector_key_tipc_addrs tipcaddrs;
+ };
+};
+
+/**
+ * flow_dissector_key_tp_ports:
+ * @ports: port numbers of Transport header
+ * src: source port number
+ * dst: destination port number
+ */
+struct flow_dissector_key_ports {
+ union {
+ __be32 ports;
+ struct {
+ __be16 src;
+ __be16 dst;
+ };
+ };
+};
+
+
+/**
+ * struct flow_dissector_key_eth_addrs:
+ * @src: source Ethernet address
+ * @dst: destination Ethernet address
+ */
+struct flow_dissector_key_eth_addrs {
+ /* (dst,src) must be grouped, in the same way than in ETH header */
+ unsigned char dst[ETH_ALEN];
+ unsigned char src[ETH_ALEN];
+};
+
+enum flow_dissector_key_id {
+ FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
+ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+ FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+
+ FLOW_DISSECTOR_KEY_MAX,
+};
+
+struct flow_dissector_key {
+ enum flow_dissector_key_id key_id;
+ size_t offset; /* offset of struct flow_dissector_key_*
+ in target the struct */
+};
+
+struct flow_dissector {
+ unsigned int used_keys; /* each bit repesents presence of one key id */
+ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
+};
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
+ void *data, __be16 proto, int nhoff, int hlen);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container)
+{
+ return __skb_flow_dissect(skb, flow_dissector, target_container,
+ NULL, 0, 0, 0);
+}
+
+struct flow_keys {
+ struct flow_dissector_key_control control;
+#define FLOW_KEYS_HASH_START_FIELD basic
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_tags tags;
+ struct flow_dissector_key_keyid keyid;
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+#define FLOW_KEYS_HASH_OFFSET \
+ offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
+
+__be32 flow_get_u32_src(const struct flow_keys *flow);
+__be32 flow_get_u32_dst(const struct flow_keys *flow);
+
+extern struct flow_dissector flow_keys_dissector;
+extern struct flow_dissector flow_keys_buf_dissector;
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+ struct flow_keys *flow)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+ NULL, 0, 0, 0);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+ void *data, __be16 proto,
+ int nhoff, int hlen)
+{
+ memset(flow, 0, sizeof(*flow));
+ return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+ data, proto, nhoff, hlen);
+}
+
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+ int thoff, u8 ip_proto)
+{
+ return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+
+/* struct flow_keys_digest:
+ *
+ * This structure is used to hold a digest of the full flow keys. This is a
+ * larger "hash" of a flow to allow definitively matching specific flows where
+ * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
+ * that it can by used in CB of skb (see sch_choke for an example).
+ */
+#define FLOW_KEYS_DIGEST_LEN 16
+struct flow_keys_digest {
+ u8 data[FLOW_KEYS_DIGEST_LEN];
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+ const struct flow_keys *flow);
+
+#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
deleted file mode 100644
index dc8fd81412bf..000000000000
--- a/include/net/flow_keys.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _NET_FLOW_KEYS_H
-#define _NET_FLOW_KEYS_H
-
-/* struct flow_keys:
- * @src: source ip address in case of IPv4
- * For IPv6 it contains 32bit hash of src address
- * @dst: destination ip address in case of IPv4
- * For IPv6 it contains 32bit hash of dst address
- * @ports: port numbers of Transport header
- * port16[0]: src port number
- * port16[1]: dst port number
- * @thoff: Transport header offset
- * @n_proto: Network header protocol (eg. IPv4/IPv6)
- * @ip_proto: Transport header protocol (eg. TCP/UDP)
- * All the members, except thoff, are in network byte order.
- */
-struct flow_keys {
- /* (src,dst) must be grouped, in the same way than in IP header */
- __be32 src;
- __be32 dst;
- union {
- __be32 ports;
- __be16 port16[2];
- };
- u16 thoff;
- __be16 n_proto;
- u8 ip_proto;
-};
-
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
- void *data, __be16 proto, int nhoff, int hlen);
-static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
-{
- return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
-}
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- void *data, int hlen_proto);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
-{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-u32 flow_hash_from_keys(struct flow_keys *keys);
-unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
- __be16 protocol);
-#endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 14fb8d3390b4..2a0543a1899d 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,6 +62,11 @@ struct genevehdr {
struct geneve_opt options[];
};
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+ return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
#ifdef CONFIG_INET
struct geneve_sock;
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 94a297052442..0a87975128ec 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -422,16 +422,6 @@ struct ieee802154_mlme_ops {
struct ieee802154_mac_params *params);
struct ieee802154_llsec_ops *llsec;
-
- /* The fields below are required. */
-
- /*
- * FIXME: these should become the part of PIB/MIB interface.
- * However we still don't have IB interface of any kind
- */
- __le16 (*get_pan_id)(const struct net_device *dev);
- __le16 (*get_short_addr)(const struct net_device *dev);
- u8 (*get_dsn)(const struct net_device *dev);
};
static inline struct ieee802154_mlme_ops *
@@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev)
return dev->ml_priv;
}
-static inline struct ieee802154_reduced_mlme_ops *
-ieee802154_reduced_mlme_ops(const struct net_device *dev)
-{
- return dev->ml_priv;
-}
-
#endif
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 4a92423eefa5..279f83591971 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -41,7 +41,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
static inline void inet_ctl_sock_destroy(struct sock *sk)
{
- sk_release_kernel(sk);
+ sock_release(sk->sk_socket);
}
#endif
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 8d1765577acc..e1300b3dd597 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -43,7 +43,7 @@ enum {
* @len: total length of the original datagram
* @meat: length of received fragments so far
* @flags: fragment queue flags
- * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
*/
struct inet_frag_queue {
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 73fe0f9525d9..b73c88a19dd4 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/inet_sock.h>
@@ -148,8 +147,6 @@ struct inet_hashinfo {
*/
struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
____cacheline_aligned_in_smp;
-
- atomic_t bsockets;
};
static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -166,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp(
return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
}
-static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
-{
- unsigned int i, size = 256;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
- unsigned int nr_pcpus = num_possible_cpus();
-#endif
- if (nr_pcpus >= 4)
- size = 512;
- if (nr_pcpus >= 8)
- size = 1024;
- if (nr_pcpus >= 16)
- size = 2048;
- if (nr_pcpus >= 32)
- size = 4096;
- if (sizeof(spinlock_t) != 0) {
-#ifdef CONFIG_NUMA
- if (size * sizeof(spinlock_t) > PAGE_SIZE)
- hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
- else
-#endif
- hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
- GFP_KERNEL);
- if (!hashinfo->ehash_locks)
- return ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&hashinfo->ehash_locks[i]);
- }
- hashinfo->ehash_locks_mask = size - 1;
- return 0;
-}
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
{
- if (hashinfo->ehash_locks) {
-#ifdef CONFIG_NUMA
- unsigned int size = (hashinfo->ehash_locks_mask + 1) *
- sizeof(spinlock_t);
- if (size > PAGE_SIZE)
- vfree(hashinfo->ehash_locks);
- else
-#endif
- kfree(hashinfo->ehash_locks);
- hashinfo->ehash_locks = NULL;
- }
+ kvfree(hashinfo->ehash_locks);
+ hashinfo->ehash_locks = NULL;
}
struct inet_bind_bucket *
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index b6c3737da4e9..47eb67b08abd 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -187,6 +187,7 @@ struct inet_sock {
transparent:1,
mc_all:1,
nodefrag:1;
+ __u8 bind_address_no_port:1;
__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
diff --git a/include/net/ip.h b/include/net/ip.h
index d14af7edd197..0750a186ea63 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -31,7 +31,7 @@
#include <net/route.h>
#include <net/snmp.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
struct sock;
@@ -45,6 +45,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_COMPLETE BIT(3)
#define IPSKB_REROUTED BIT(4)
#define IPSKB_DOREDIRECT BIT(5)
+#define IPSKB_FRAG_PMTU BIT(6)
u16 frag_max_size;
};
@@ -108,9 +109,8 @@ int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
- int (*output)(struct sock *, struct sk_buff *));
-int ip_do_nat(struct sk_buff *skb);
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *));
void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct sk_buff *skb);
int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
@@ -355,15 +355,32 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
skb->len, proto, 0);
}
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v4addrs.src = iph->saddr;
+ * flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+ const struct iphdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+ offsetof(typeof(flow->addrs), v4addrs.src) +
+ sizeof(flow->addrs.v4addrs.src));
+ memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+}
+
static inline void inet_set_txhash(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct flow_keys keys;
- keys.src = inet->inet_saddr;
- keys.dst = inet->inet_daddr;
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
+ memset(&keys, 0, sizeof(keys));
+
+ keys.addrs.v4addrs.src = inet->inet_saddr;
+ keys.addrs.v4addrs.dst = inet->inet_daddr;
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ keys.ports.src = inet->inet_sport;
+ keys.ports.dst = inet->inet_dport;
sk->sk_txhash = flow_hash_from_keys(&keys);
}
@@ -478,6 +495,16 @@ enum ip_defrag_users {
IP_DEFRAG_MACVLAN,
};
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+ enum ip_defrag_users lower_bond,
+ enum ip_defrag_users upper_bond)
+{
+ return user >= lower_bond && user <= upper_bond;
+}
+
int ip_defrag(struct sk_buff *skb, u32 user);
#ifdef CONFIG_INET
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 20e80fa7bbdd..3b76849c190f 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -120,45 +120,19 @@ struct rt6_info {
struct rt6key rt6i_src;
struct rt6key rt6i_prefsrc;
+ struct list_head rt6i_uncached;
+ struct uncached_list *rt6i_uncached_list;
+
struct inet6_dev *rt6i_idev;
- unsigned long _rt6i_peer;
+ struct rt6_info * __percpu *rt6i_pcpu;
u32 rt6i_metric;
+ u32 rt6i_pmtu;
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len;
u8 rt6i_protocol;
};
-static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
-{
- return inetpeer_ptr(rt->_rt6i_peer);
-}
-
-static inline bool rt6_has_peer(struct rt6_info *rt)
-{
- return inetpeer_ptr_is_peer(rt->_rt6i_peer);
-}
-
-static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
- __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
- return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
-{
- inetpeer_init_ptr(&rt->_rt6i_peer, base);
-}
-
-static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
-{
- inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
-}
-
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
@@ -189,13 +163,12 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
-static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
- struct dst_entry *new = (struct dst_entry *) from;
+ if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
+ rt = (struct rt6_info *)(rt->dst.from);
- rt->rt6i_flags &= ~RTF_EXPIRES;
- dst_hold(new);
- rt->dst.from = new;
+ return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
}
static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 5e192068e6cb..297629aadb19 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -145,7 +145,7 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_IPV6_SUBTREES
np->saddr_cache = saddr;
#endif
- np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ np->dst_cookie = rt6_get_cookie(rt);
}
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
@@ -163,11 +163,14 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
return rt->rt6i_flags & RTF_LOCAL;
}
-static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+ const struct in6_addr *daddr)
{
- struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+ struct rt6_info *rt = (struct rt6_info *)dst;
- return rt->rt6i_flags & RTF_ANYCAST;
+ return rt->rt6i_flags & RTF_ANYCAST ||
+ (rt->rt6i_dst.plen != 128 &&
+ ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
}
int ip6_fragment(struct sock *sk, struct sk_buff *skb,
@@ -194,9 +197,15 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk)
inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
}
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
+ struct in6_addr *daddr)
{
- return &rt->rt6i_gateway;
+ if (rt->rt6i_flags & RTF_GATEWAY)
+ return &rt->rt6i_gateway;
+ else if (unlikely(rt->rt6i_flags & RTF_CACHE))
+ return &rt->rt6i_dst.addr;
+ else
+ return daddr;
}
#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index eec8ad3c9843..82dbdb092a5d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -19,7 +19,7 @@
#include <net/if_inet6.h>
#include <net/ndisc.h>
#include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#include <net/snmp.h>
#define SIN6_LEN_RFC2133 24
@@ -239,8 +239,10 @@ struct ip6_flowlabel {
struct net *fl_net;
};
-#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
-#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000)
+
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
@@ -669,8 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
}
-void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
- struct rt6_info *rt);
+__be32 ipv6_select_ident(struct net *net,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -689,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
return hlimit;
}
+/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to : flow->v6addrs.src = iph->saddr;
+ * flow->v6addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
+ const struct ipv6hdr *iph)
+{
+ BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
+ offsetof(typeof(flow->addrs), v6addrs.src) +
+ sizeof(flow->addrs.v6addrs.src));
+ memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+ flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline void ip6_set_txhash(struct sock *sk)
{
@@ -696,10 +713,15 @@ static inline void ip6_set_txhash(struct sock *sk)
struct ipv6_pinfo *np = inet6_sk(sk);
struct flow_keys keys;
- keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
- keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
- keys.port16[0] = inet->inet_sport;
- keys.port16[1] = inet->inet_dport;
+ memset(&keys, 0, sizeof(keys));
+
+ memcpy(&keys.addrs.v6addrs.src, &np->saddr,
+ sizeof(keys.addrs.v6addrs.src));
+ memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
+ sizeof(keys.addrs.v6addrs.dst));
+ keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ keys.ports.src = inet->inet_sport;
+ keys.ports.dst = inet->inet_dport;
sk->sk_txhash = flow_hash_from_keys(&keys);
}
@@ -719,6 +741,9 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
hash ^= hash >> 12;
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+ if (net->ipv6.sysctl.flowlabel_state_ranges)
+ flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
}
return flowlabel;
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 0134681acc4c..fe994d2e5286 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -96,7 +96,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
}
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index fc57f6b82fc5..6b1077c2a63f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -337,10 +337,16 @@ enum ieee80211_bss_change {
* enum ieee80211_event_type - event to be notified to the low level driver
* @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
* @MLME_EVENT: event related to MLME
+ * @BAR_RX_EVENT: a BAR was received
+ * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
+ * they timed out. This won't be called for each frame released, but only
+ * once each time the timeout triggers.
*/
enum ieee80211_event_type {
RSSI_EVENT,
MLME_EVENT,
+ BAR_RX_EVENT,
+ BA_FRAME_TIMEOUT,
};
/**
@@ -400,17 +406,31 @@ struct ieee80211_mlme_event {
};
/**
+ * struct ieee80211_ba_event - data attached for BlockAck related events
+ * @sta: pointer to the &ieee80211_sta to which this event relates
+ * @tid: the tid
+ * @ssn: the starting sequence number (for %BAR_RX_EVENT)
+ */
+struct ieee80211_ba_event {
+ struct ieee80211_sta *sta;
+ u16 tid;
+ u16 ssn;
+};
+
+/**
* struct ieee80211_event - event to be sent to the driver
* @type: The event itself. See &enum ieee80211_event_type.
* @rssi: relevant if &type is %RSSI_EVENT
* @mlme: relevant if &type is %AUTH_EVENT
- * @u: union holding the above two fields
+ * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u:union holding the fields above
*/
struct ieee80211_event {
enum ieee80211_event_type type;
union {
struct ieee80211_rssi_event rssi;
struct ieee80211_mlme_event mlme;
+ struct ieee80211_ba_event ba;
} u;
};
@@ -426,12 +446,8 @@ struct ieee80211_event {
* @ibss_creator: indicates if a new IBSS network is being created
* @aid: association ID number, valid only when @assoc is true
* @use_cts_prot: use CTS protection
- * @use_short_preamble: use 802.11b short preamble;
- * if the hardware cannot handle this it must set the
- * IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE hardware flag
- * @use_short_slot: use short slot time (only relevant for ERP);
- * if the hardware cannot handle this it must set the
- * IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
+ * @use_short_preamble: use 802.11b short preamble
+ * @use_short_slot: use short slot time (only relevant for ERP)
* @dtim_period: num of beacons before the next DTIM, for beaconing,
* valid in station mode only if after the driver was notified
* with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
@@ -855,6 +871,9 @@ struct ieee80211_tx_info {
/* 4 bytes free */
} control;
struct {
+ u64 cookie;
+ } ack;
+ struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
s32 ack_signal;
u8 ampdu_ack_len;
@@ -1459,6 +1478,9 @@ enum ieee80211_key_flags {
* wants to be given when a frame is transmitted and needs to be
* encrypted in hardware.
* @cipher: The key's cipher suite selector.
+ * @tx_pn: PN used for TX on non-TKIP keys, may be used by the driver
+ * as well if it needs to do software PN assignment by itself
+ * (e.g. due to TSO)
* @flags: key flags, see &enum ieee80211_key_flags.
* @keyidx: the key index (0-3)
* @keylen: key material length
@@ -1471,6 +1493,7 @@ enum ieee80211_key_flags {
* @iv_len: The IV length for this key type
*/
struct ieee80211_key_conf {
+ atomic64_t tx_pn;
u32 cipher;
u8 icv_len;
u8 iv_len;
@@ -1481,6 +1504,47 @@ struct ieee80211_key_conf {
u8 key[0];
};
+#define IEEE80211_MAX_PN_LEN 16
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ * reverse order than in packet)
+ * @hw: data for HW-only (e.g. cipher scheme) keys
+ */
+struct ieee80211_key_seq {
+ union {
+ struct {
+ u32 iv32;
+ u16 iv16;
+ } tkip;
+ struct {
+ u8 pn[6];
+ } ccmp;
+ struct {
+ u8 pn[6];
+ } aes_cmac;
+ struct {
+ u8 pn[6];
+ } aes_gmac;
+ struct {
+ u8 pn[6];
+ } gcmp;
+ struct {
+ u8 seq[IEEE80211_MAX_PN_LEN];
+ u8 seq_len;
+ } hw;
+ };
+};
+
/**
* struct ieee80211_cipher_scheme - cipher scheme
*
@@ -1667,8 +1731,7 @@ struct ieee80211_tx_control {
* @sta: station table entry, %NULL for per-vif queue
* @tid: the TID for this queue (unused for per-vif queue)
* @ac: the AC for this queue
- * @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *).
+ * @drv_priv: driver private area, sized by hw->txq_data_size
*
* The driver can obtain packets from this queue by calling
* ieee80211_tx_dequeue().
@@ -1717,13 +1780,6 @@ struct ieee80211_txq {
* multicast frames when there are power saving stations so that
* the driver can fetch them with ieee80211_get_buffered_bc().
*
- * @IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE:
- * Hardware is not capable of short slot operation on the 2.4 GHz band.
- *
- * @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
- * Hardware is not capable of receiving frames with short preamble on
- * the 2.4 GHz band.
- *
* @IEEE80211_HW_SIGNAL_UNSPEC:
* Hardware can provide signal values but we don't know its units. We
* expect values between 0 and @max_signal.
@@ -1798,6 +1854,10 @@ struct ieee80211_txq {
* the driver returns 1. This also forces the driver to advertise its
* supported cipher suites.
*
+ * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
+ * this currently requires only the ability to calculate the duration
+ * for frames.
+ *
* @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
* queue mapping in order to use different queues (not just one per AC)
* for different virtual interfaces. See the doc section on HW queue
@@ -1825,41 +1885,44 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload
* or tailroom of TX skbs without copying them first.
*
- * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
+ * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
* in one command, mac80211 doesn't have to run separate scans per band.
+ *
+ * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
- IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
- IEEE80211_HW_RX_INCLUDES_FCS = 1<<1,
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2,
- IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3,
- IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
- IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
- IEEE80211_HW_SIGNAL_DBM = 1<<6,
- IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC = 1<<7,
- IEEE80211_HW_SPECTRUM_MGMT = 1<<8,
- IEEE80211_HW_AMPDU_AGGREGATION = 1<<9,
- IEEE80211_HW_SUPPORTS_PS = 1<<10,
- IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
- IEEE80211_HW_MFP_CAPABLE = 1<<13,
- IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
- IEEE80211_HW_NO_AUTO_VIF = 1<<15,
- IEEE80211_HW_SW_CRYPTO_CONTROL = 1<<16,
- /* free slots */
- IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
- IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
- IEEE80211_HW_QUEUE_CONTROL = 1<<20,
- IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
- IEEE80211_HW_AP_LINK_PS = 1<<22,
- IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
- IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
- IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
- IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
- IEEE80211_HW_CHANCTX_STA_CSA = 1<<28,
- IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29,
- IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30,
+ IEEE80211_HW_HAS_RATE_CONTROL,
+ IEEE80211_HW_RX_INCLUDES_FCS,
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING,
+ IEEE80211_HW_SIGNAL_UNSPEC,
+ IEEE80211_HW_SIGNAL_DBM,
+ IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC,
+ IEEE80211_HW_SPECTRUM_MGMT,
+ IEEE80211_HW_AMPDU_AGGREGATION,
+ IEEE80211_HW_SUPPORTS_PS,
+ IEEE80211_HW_PS_NULLFUNC_STACK,
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS,
+ IEEE80211_HW_MFP_CAPABLE,
+ IEEE80211_HW_WANT_MONITOR_VIF,
+ IEEE80211_HW_NO_AUTO_VIF,
+ IEEE80211_HW_SW_CRYPTO_CONTROL,
+ IEEE80211_HW_SUPPORT_FAST_XMIT,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS,
+ IEEE80211_HW_CONNECTION_MONITOR,
+ IEEE80211_HW_QUEUE_CONTROL,
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK,
+ IEEE80211_HW_AP_LINK_PS,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW,
+ IEEE80211_HW_SUPPORTS_RC_TABLE,
+ IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF,
+ IEEE80211_HW_TIMING_BEACON_ONLY,
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES,
+ IEEE80211_HW_CHANCTX_STA_CSA,
+ IEEE80211_HW_SUPPORTS_CLONED_SKBS,
+ IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+
+ /* keep last, obviously */
+ NUM_IEEE80211_HW_FLAGS
};
/**
@@ -1940,8 +2003,8 @@ enum ieee80211_hw_flags {
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
* @netdev_features: netdev features to be set in each netdev created
- * from this HW. Note only HW checksum features are currently
- * compatible with mac80211. Other feature bits will be rejected.
+ * from this HW. Note that not all features are usable with mac80211,
+ * other features will be rejected during HW registration.
*
* @uapsd_queues: This bitmap is included in (re)association frame to indicate
* for each access category if it is uAPSD trigger-enabled and delivery-
@@ -1966,7 +2029,7 @@ struct ieee80211_hw {
struct wiphy *wiphy;
const char *rate_control_algorithm;
void *priv;
- u32 flags;
+ unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)];
unsigned int extra_tx_headroom;
unsigned int extra_beacon_tailroom;
int vif_data_size;
@@ -1992,6 +2055,20 @@ struct ieee80211_hw {
int txq_ac_max_pending;
};
+static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return test_bit(flg, hw->flags);
+}
+#define ieee80211_hw_check(hw, flg) _ieee80211_hw_check(hw, IEEE80211_HW_##flg)
+
+static inline void _ieee80211_hw_set(struct ieee80211_hw *hw,
+ enum ieee80211_hw_flags flg)
+{
+ return __set_bit(flg, hw->flags);
+}
+#define ieee80211_hw_set(hw, flg) _ieee80211_hw_set(hw, IEEE80211_HW_##flg)
+
/**
* struct ieee80211_scan_request - hw scan request
*
@@ -2505,10 +2582,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* stack. It is always safe to pass more frames than requested,
* but this has negative impact on power consumption.
*
- * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS,
- * think of the BSS as your network segment and then this corresponds
- * to the regular ethernet device promiscuous mode.
- *
* @FIF_ALLMULTI: pass all multicast frames, this is used if requested
* by the user or if the hardware is not capable of filtering by
* multicast address.
@@ -2525,18 +2598,16 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* mac80211 needs to do and the amount of CPU wakeups, so you should
* honour this flag if possible.
*
- * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS
- * is not set then only those addressed to this station.
+ * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
+ * station
*
* @FIF_OTHER_BSS: pass frames destined to other BSSes
*
- * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only
- * those addressed to this station.
+ * @FIF_PSPOLL: pass PS Poll frames
*
* @FIF_PROBE_REQ: pass probe request frames
*/
enum ieee80211_filter_flags {
- FIF_PROMISC_IN_BSS = 1<<0,
FIF_ALLMULTI = 1<<1,
FIF_FCSFAIL = 1<<2,
FIF_PLCPFAIL = 1<<3,
@@ -2819,9 +2890,9 @@ enum ieee80211_reconfig_type {
* Returns zero if statistics are available.
* The callback can sleep.
*
- * @get_tkip_seq: If your device implements TKIP encryption in hardware this
- * callback should be provided to read the TKIP transmit IVs (both IV32
- * and IV16) for the given key from hardware.
+ * @get_key_seq: If your device implements encryption in hardware and does
+ * IV/PN assignment then this callback should be provided to read the
+ * IV/PN for the given key from hardware.
* The callback must be atomic.
*
* @set_frag_threshold: Configuration of fragmentation threshold. Assign this
@@ -3004,7 +3075,7 @@ enum ieee80211_reconfig_type {
* The callback can sleep.
* @event_callback: Notify driver about any event in mac80211. See
* &enum ieee80211_event_type for the different types.
- * The callback can sleep.
+ * The callback must be atomic.
*
* @release_buffered_frames: Release buffered frames according to the given
* parameters. In the case where the driver buffers some frames for
@@ -3220,8 +3291,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif);
int (*get_stats)(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
- void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
- u32 *iv32, u16 *iv16);
+ void (*get_key_seq)(struct ieee80211_hw *hw,
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq);
int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3469,14 +3541,15 @@ enum ieee80211_tpt_led_trigger_flags {
};
#ifdef CONFIG_MAC80211_LEDS
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
- unsigned int flags,
- const struct ieee80211_tpt_blink *blink_table,
- unsigned int blink_table_len);
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
#endif
/**
* ieee80211_get_tx_led_name - get name of TX LED
@@ -3490,7 +3563,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_tx_led_name(hw);
@@ -3511,7 +3584,7 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_rx_led_name(hw);
@@ -3532,7 +3605,7 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_assoc_led_name(hw);
@@ -3553,7 +3626,7 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
*
* Return: The name of the LED trigger. %NULL if not configured for LEDs.
*/
-static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
{
#ifdef CONFIG_MAC80211_LEDS
return __ieee80211_get_radio_led_name(hw);
@@ -3574,7 +3647,7 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
*
* Note: This function must be called before ieee80211_register_hw().
*/
-static inline char *
+static inline const char *
ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
const struct ieee80211_tpt_blink *blink_table,
unsigned int blink_table_len)
@@ -4255,40 +4328,6 @@ void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
u8 *k1, u8 *k2);
/**
- * struct ieee80211_key_seq - key sequence counter
- *
- * @tkip: TKIP data, containing IV32 and IV16 in host byte order
- * @ccmp: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @aes_cmac: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @aes_gmac: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- * @gcmp: PN data, most significant byte first (big endian,
- * reverse order than in packet)
- */
-struct ieee80211_key_seq {
- union {
- struct {
- u32 iv32;
- u16 iv16;
- } tkip;
- struct {
- u8 pn[6];
- } ccmp;
- struct {
- u8 pn[6];
- } aes_cmac;
- struct {
- u8 pn[6];
- } aes_gmac;
- struct {
- u8 pn[6];
- } gcmp;
- };
-};
-
-/**
* ieee80211_get_key_tx_seq - get key TX sequence counter
*
* @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 7df28a4c23f9..9605c7f7453f 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -89,41 +89,26 @@ struct ieee802154_hw {
#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001
/* Indicates that receiver will autorespond with ACK frames. */
#define IEEE802154_HW_AACK 0x00000002
-/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER 0x00000004
/* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT 0x00000008
-/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE 0x00000010
-/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020
+#define IEEE802154_HW_LBT 0x00000004
/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
* settings. */
-#define IEEE802154_HW_CSMA_PARAMS 0x00000040
+#define IEEE802154_HW_CSMA_PARAMS 0x00000008
/* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES 0x00000080
+#define IEEE802154_HW_FRAME_RETRIES 0x00000010
/* Indicates that transceiver will support hardware address filter setting. */
-#define IEEE802154_HW_AFILT 0x00000100
+#define IEEE802154_HW_AFILT 0x00000020
/* Indicates that transceiver will support promiscuous mode setting. */
-#define IEEE802154_HW_PROMISCUOUS 0x00000200
+#define IEEE802154_HW_PROMISCUOUS 0x00000040
/* Indicates that receiver omits FCS. */
-#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400
+#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000080
/* Indicates that receiver will not filter frames with bad checksum. */
-#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800
+#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000100
/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \
IEEE802154_HW_RX_OMIT_CKSUM)
-/* This groups the most common CSMA support fields into one. */
-#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \
- IEEE802154_HW_CCA_ED_LEVEL | \
- IEEE802154_HW_CSMA_PARAMS)
-
-/* This groups the most common ARET support fields into one. */
-#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \
- IEEE802154_HW_FRAME_RETRIES)
-
/* struct ieee802154_ops - callbacks from mac802154 to the driver
*
* This structure contains various callbacks that the driver may
@@ -171,7 +156,7 @@ struct ieee802154_hw {
* Returns either zero, or negative errno.
*
* set_txpower:
- * Set radio transmit power in dB. Called with pib_lock held.
+ * Set radio transmit power in mBm. Called with pib_lock held.
* Returns either zero, or negative errno.
*
* set_lbt
@@ -184,7 +169,7 @@ struct ieee802154_hw {
* Returns either zero, or negative errno.
*
* set_cca_ed_level
- * Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ * Sets the CCA energy detection threshold in mBm. Called with pib_lock
* held.
* Returns either zero, or negative errno.
*
@@ -213,12 +198,11 @@ struct ieee802154_ops {
int (*set_hw_addr_filt)(struct ieee802154_hw *hw,
struct ieee802154_hw_addr_filt *filt,
unsigned long changed);
- int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm);
+ int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
int (*set_lbt)(struct ieee802154_hw *hw, bool on);
int (*set_cca_mode)(struct ieee802154_hw *hw,
const struct wpan_phy_cca *cca);
- int (*set_cca_ed_level)(struct ieee802154_hw *hw,
- s32 level);
+ int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
int (*set_csma_params)(struct ieee802154_hw *hw,
u8 min_be, u8 max_be, u8 retries);
int (*set_frame_retries)(struct ieee802154_hw *hw,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index f733656404de..72eb23723294 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -58,6 +58,7 @@ struct net {
struct list_head exit_list; /* Use only net_mutex */
struct user_namespace *user_ns; /* Owning user namespace */
+ spinlock_t nsid_lock;
struct idr netns_ids;
struct ns_common ns;
@@ -271,7 +272,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#define __net_initconst __initconst
#endif
+int peernet2id_alloc(struct net *net, struct net *peer);
int peernet2id(struct net *net, struct net *peer);
+bool peernet_has_id(struct net *net, struct net *peer);
struct net *get_net_ns_by_id(struct net *net, int id);
struct pernet_operations {
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index e6bcf55dcf20..3d6f48ca40a7 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -819,6 +819,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt,
* @use: number of chain references to this table
* @flags: table flag (see enum nft_table_flags)
* @name: name of the table
+ * @dev: this table is bound to this device (if any)
*/
struct nft_table {
struct list_head list;
@@ -828,6 +829,11 @@ struct nft_table {
u32 use;
u16 flags;
char name[NFT_TABLE_MAXNAMELEN];
+ struct net_device *dev;
+};
+
+enum nft_af_flags {
+ NFT_AF_NEEDS_DEV = (1 << 0),
};
/**
@@ -838,6 +844,7 @@ struct nft_table {
* @nhooks: number of hooks in this family
* @owner: module owner
* @tables: used internally
+ * @flags: family flags
* @nops: number of hook ops in this family
* @hook_ops_init: initialization function for chain hook ops
* @hooks: hookfn overrides for packet validation
@@ -848,6 +855,7 @@ struct nft_af_info {
unsigned int nhooks;
struct module *owner;
struct list_head tables;
+ u32 flags;
unsigned int nops;
void (*hook_ops_init)(struct nf_hook_ops *,
unsigned int);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 614a49be68a9..c68926b4899c 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -19,6 +19,7 @@ struct sock;
struct local_ports {
seqlock_t lock;
int range[2];
+ bool warned;
};
struct ping_group_range {
@@ -77,6 +78,8 @@ struct netns_ipv4 {
struct local_ports ip_local_ports;
int sysctl_tcp_ecn;
+ int sysctl_tcp_ecn_fallback;
+
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
int sysctl_ip_nonlocal_bind;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index d2527bf81142..8d93544a2d2b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -34,6 +34,7 @@ struct netns_sysctl_ipv6 {
int fwmark_reflect;
int idgen_retries;
int idgen_delay;
+ int flowlabel_state_ranges;
};
struct netns_ipv6 {
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index eee608b12cc9..c80781146019 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,6 +13,7 @@ struct netns_nftables {
struct nft_af_info *inet;
struct nft_af_info *arp;
struct nft_af_info *bridge;
+ struct nft_af_info *netdev;
unsigned int base_seq;
u8 gencursor;
};
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index f8b5bc997959..0badebd1de7f 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -100,6 +100,8 @@ enum nl802154_attrs {
NL802154_ATTR_EXTENDED_ADDR,
+ NL802154_ATTR_WPAN_PHY_CAPS,
+
/* add attributes here, update the policy in nl802154.c */
__NL802154_ATTR_AFTER_LAST,
@@ -120,6 +122,61 @@ enum nl802154_iftype {
};
/**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ * nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+ __NL802154_CAP_ATTR_INVALID,
+
+ NL802154_CAP_ATTR_IFTYPES,
+
+ NL802154_CAP_ATTR_CHANNELS,
+ NL802154_CAP_ATTR_TX_POWERS,
+
+ NL802154_CAP_ATTR_CCA_ED_LEVELS,
+ NL802154_CAP_ATTR_CCA_MODES,
+ NL802154_CAP_ATTR_CCA_OPTS,
+
+ NL802154_CAP_ATTR_MIN_MINBE,
+ NL802154_CAP_ATTR_MAX_MINBE,
+
+ NL802154_CAP_ATTR_MIN_MAXBE,
+ NL802154_CAP_ATTR_MAX_MAXBE,
+
+ NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+ NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+ NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+ NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+ NL802154_CAP_ATTR_LBT,
+
+ /* keep last */
+ __NL802154_CAP_ATTR_AFTER_LAST,
+ NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl802154_cca_modes - cca modes
*
* @__NL802154_CCA_INVALID: cca mode number 0 is reserved
@@ -162,4 +219,26 @@ enum nl802154_cca_opts {
NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
};
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+ NL802154_SUPPORTED_BOOL_FALSE,
+ NL802154_SUPPORTED_BOOL_TRUE,
+ /* to handle them in a mask */
+ __NL802154_SUPPORTED_BOOL_INVALD,
+ NL802154_SUPPORTED_BOOL_BOTH,
+
+ /* keep last */
+ __NL802154_SUPPORTED_BOOL_AFTER_LAST,
+ NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
#endif /* __NL802154_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 9f4265ce8892..87935cad2f7b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -64,6 +64,7 @@ struct request_sock {
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
+ u32 *saved_syn;
u32 secid;
u32 peer_secid;
};
@@ -77,7 +78,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
req->rsk_ops = ops;
sock_hold(sk_listener);
req->rsk_listener = sk_listener;
-
+ req->saved_syn = NULL;
/* Following is temporary. It is coupled with debugging
* helpers in reqsk_put() & reqsk_free()
*/
@@ -104,6 +105,7 @@ static inline void reqsk_free(struct request_sock *req)
req->rsk_ops->destructor(req);
if (req->rsk_listener)
sock_put(req->rsk_listener);
+ kfree(req->saved_syn);
kmem_cache_free(req->rsk_ops->slab, req);
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6d778efcfdfd..2738f6f87908 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -501,12 +501,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return sch->enqueue(skb, sch);
}
-static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
-{
- qdisc_skb_cb(skb)->pkt_len = skb->len;
- return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
-}
-
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
{
return q->flags & TCQ_F_CPUSTATS;
@@ -745,23 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
return rtab->data[slot];
}
-#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
- int action)
-{
- struct sk_buff *n;
-
- n = skb_clone(skb, gfp_mask);
-
- if (n) {
- n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
- n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
- n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
- }
- return n;
-}
-#endif
-
struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
diff --git a/include/net/sock.h b/include/net/sock.h
index 3a4898ec8c67..26c1c3171e00 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -184,6 +184,7 @@ struct sock_common {
unsigned char skc_reuse:4;
unsigned char skc_reuseport:1;
unsigned char skc_ipv6only:1;
+ unsigned char skc_net_refcnt:1;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
@@ -323,6 +324,7 @@ struct sock {
#define sk_reuse __sk_common.skc_reuse
#define sk_reuseport __sk_common.skc_reuseport
#define sk_ipv6only __sk_common.skc_ipv6only
+#define sk_net_refcnt __sk_common.skc_net_refcnt
#define sk_bound_dev_if __sk_common.skc_bound_dev_if
#define sk_bind_node __sk_common.skc_bind_node
#define sk_prot __sk_common.skc_prot
@@ -1366,7 +1368,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
* Functions for memory accounting
*/
int __sk_mem_schedule(struct sock *sk, int size, int kind);
-void __sk_mem_reclaim(struct sock *sk);
+void __sk_mem_reclaim(struct sock *sk, int amount);
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1407,7 +1409,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc);
}
static inline void sk_mem_reclaim_partial(struct sock *sk)
@@ -1415,7 +1417,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
- __sk_mem_reclaim(sk);
+ __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
}
static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1514,9 +1516,8 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot);
+ struct proto *prot, int kern);
void sk_free(struct sock *sk);
-void sk_release_kernel(struct sock *sk);
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -2024,7 +2025,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
}
}
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule);
/**
* sk_page_frag - return an appropriate page_frag
@@ -2192,22 +2194,6 @@ void sock_net_set(struct sock *sk, struct net *net)
write_pnet(&sk->sk_net, net);
}
-/*
- * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a reference to a namespace in order to allow
- * to stop it.
- * Sockets after sk_change_net should be released using sk_release_kernel
- */
-static inline void sk_change_net(struct sock *sk, struct net *net)
-{
- struct net *current_net = sock_net(sk);
-
- if (!net_eq(current_net, net)) {
- put_net(current_net);
- sock_net_set(sk, net);
- }
-}
-
static inline struct sock *skb_steal_sock(struct sk_buff *skb)
{
if (skb->sk) {
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d2e69ee3019a..437f8fe75705 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -14,154 +14,261 @@
#include <linux/netdevice.h>
#include <linux/notifier.h>
+#define SWITCHDEV_F_NO_RECURSE BIT(0)
+
+enum switchdev_trans {
+ SWITCHDEV_TRANS_NONE,
+ SWITCHDEV_TRANS_PREPARE,
+ SWITCHDEV_TRANS_ABORT,
+ SWITCHDEV_TRANS_COMMIT,
+};
+
+enum switchdev_attr_id {
+ SWITCHDEV_ATTR_UNDEFINED,
+ SWITCHDEV_ATTR_PORT_PARENT_ID,
+ SWITCHDEV_ATTR_PORT_STP_STATE,
+ SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+};
+
+struct switchdev_attr {
+ enum switchdev_attr_id id;
+ enum switchdev_trans trans;
+ u32 flags;
+ union {
+ struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
+ u8 stp_state; /* PORT_STP_STATE */
+ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */
+ } u;
+};
+
struct fib_info;
+enum switchdev_obj_id {
+ SWITCHDEV_OBJ_UNDEFINED,
+ SWITCHDEV_OBJ_PORT_VLAN,
+ SWITCHDEV_OBJ_IPV4_FIB,
+ SWITCHDEV_OBJ_PORT_FDB,
+};
+
+struct switchdev_obj {
+ enum switchdev_obj_id id;
+ enum switchdev_trans trans;
+ int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
+ union {
+ struct switchdev_obj_vlan { /* PORT_VLAN */
+ u16 flags;
+ u16 vid_start;
+ u16 vid_end;
+ } vlan;
+ struct switchdev_obj_ipv4_fib { /* IPV4_FIB */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 nlflags;
+ u32 tb_id;
+ } ipv4_fib;
+ struct switchdev_obj_fdb { /* PORT_FDB */
+ const unsigned char *addr;
+ u16 vid;
+ } fdb;
+ } u;
+};
+
/**
* struct switchdev_ops - switchdev operations
*
- * @swdev_parent_id_get: Called to get an ID of the switch chip this port
- * is part of. If driver implements this, it indicates that it
- * represents a port of a switch chip.
+ * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
+ *
+ * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
*
- * @swdev_port_stp_update: Called to notify switch device port of bridge
- * port STP state change.
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
*
- * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device.
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
*
- * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device.
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
*/
-struct swdev_ops {
- int (*swdev_parent_id_get)(struct net_device *dev,
- struct netdev_phys_item_id *psid);
- int (*swdev_port_stp_update)(struct net_device *dev, u8 state);
- int (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst,
- int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags,
- u32 tb_id);
- int (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst,
- int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id);
+struct switchdev_ops {
+ int (*switchdev_port_attr_get)(struct net_device *dev,
+ struct switchdev_attr *attr);
+ int (*switchdev_port_attr_set)(struct net_device *dev,
+ struct switchdev_attr *attr);
+ int (*switchdev_port_obj_add)(struct net_device *dev,
+ struct switchdev_obj *obj);
+ int (*switchdev_port_obj_del)(struct net_device *dev,
+ struct switchdev_obj *obj);
+ int (*switchdev_port_obj_dump)(struct net_device *dev,
+ struct switchdev_obj *obj);
};
-enum netdev_switch_notifier_type {
- NETDEV_SWITCH_FDB_ADD = 1,
- NETDEV_SWITCH_FDB_DEL,
+enum switchdev_notifier_type {
+ SWITCHDEV_FDB_ADD = 1,
+ SWITCHDEV_FDB_DEL,
};
-struct netdev_switch_notifier_info {
+struct switchdev_notifier_info {
struct net_device *dev;
};
-struct netdev_switch_notifier_fdb_info {
- struct netdev_switch_notifier_info info; /* must be first */
+struct switchdev_notifier_fdb_info {
+ struct switchdev_notifier_info info; /* must be first */
const unsigned char *addr;
u16 vid;
};
static inline struct net_device *
-netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info)
+switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
return info->dev;
}
#ifdef CONFIG_NET_SWITCHDEV
-int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid);
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
-int register_netdev_switch_notifier(struct notifier_block *nb);
-int unregister_netdev_switch_notifier(struct notifier_block *nb);
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
- struct netdev_switch_notifier_info *info);
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id);
-void netdev_switch_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr);
+int switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr);
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+int register_switchdev_notifier(struct notifier_block *nb);
+int unregister_switchdev_notifier(struct notifier_block *nb);
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info);
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags);
+int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags);
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id);
+void switchdev_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 nlm_flags);
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid);
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int idx);
#else
-static inline int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static inline int switchdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
{
return -EOPNOTSUPP;
}
-static inline int netdev_switch_port_stp_update(struct net_device *dev,
- u8 state)
+static inline int switchdev_port_obj_del(struct net_device *dev,
+ struct switchdev_obj *obj)
{
return -EOPNOTSUPP;
}
-static inline int register_netdev_switch_notifier(struct notifier_block *nb)
+static inline int switchdev_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int register_switchdev_notifier(struct notifier_block *nb)
{
return 0;
}
-static inline int unregister_netdev_switch_notifier(struct notifier_block *nb)
+static inline int unregister_switchdev_notifier(struct notifier_block *nb)
{
return 0;
}
-static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
- struct netdev_switch_notifier_info *info)
+static inline int call_switchdev_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info)
{
return NOTIFY_DONE;
}
-static inline int netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
+ u32 seq, struct net_device *dev,
+ u32 filter_mask, int nlflags)
{
return -EOPNOTSUPP;
}
-static inline int netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
{
return -EOPNOTSUPP;
}
-static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
{
- return 0;
+ return -EOPNOTSUPP;
}
-static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh,
- u16 flags)
+static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type,
+ u32 nlflags, u32 tb_id)
{
return 0;
}
-static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
+static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
+ struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
{
return 0;
}
-static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
+static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
{
- return 0;
}
-static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u16 nlm_flags)
{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx)
+{
+ return -EOPNOTSUPP;
}
#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6d204f3f9df8..950cfecaad3c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+ return !!sk->sk_cgrp->memory_pressure;
+
+ return tcp_memory_pressure;
+}
/*
* The next routines deal with comparing 32 bit unsigned ints
* and worry about wraparound (automatic with unsigned arithmetic).
@@ -311,6 +319,8 @@ static inline bool tcp_out_of_memory(struct sock *sk)
return false;
}
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -326,18 +336,6 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
bool tcp_check_oom(struct sock *sk, int shift);
-/* syncookies: remember time of last synqueue overflow */
-static inline void tcp_synq_overflow(struct sock *sk)
-{
- tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
-}
-
-/* syncookies: no recent synqueue overflow on this listening socket? */
-static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
-{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
-}
extern struct proto tcp_prot;
@@ -471,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst);
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -483,13 +484,35 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
* i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
-#define MAX_SYNCOOKIE_AGE 2
+#define MAX_SYNCOOKIE_AGE 2
+#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
+#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
+
+/* syncookies: remember time of last synqueue overflow
+ * But do not dirty this field too often (once per second is enough)
+ */
+static inline void tcp_synq_overflow(struct sock *sk)
+{
+ unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned long now = jiffies;
+
+ if (time_after(now, last_overflow + HZ))
+ tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+ unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+
+ return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+}
static inline u32 tcp_cookie_time(void)
{
u64 val = get_jiffies_64();
- do_div(val, 60 * HZ);
+ do_div(val, TCP_SYNCOOKIE_PERIOD);
return val;
}
@@ -527,7 +550,7 @@ int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
void tcp_send_probe0(struct sock *);
void tcp_send_partial(struct sock *);
-int tcp_write_wakeup(struct sock *);
+int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
@@ -692,6 +715,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
#define TCPHDR_ECE 0x40
#define TCPHDR_CWR 0x80
+#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+
/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission code.
* We also store the host-order sequence numbers in here too.
@@ -705,11 +730,14 @@ struct tcp_skb_cb {
/* Note : tcp_tw_isn is used in input path only
* (isn chosen by tcp_timewait_state_process())
*
- * tcp_gso_segs is used in write queue only,
- * cf tcp_skb_pcount()
+ * tcp_gso_segs/size are used in write queue only,
+ * cf tcp_skb_pcount()/tcp_skb_mss()
*/
__u32 tcp_tw_isn;
- __u32 tcp_gso_segs;
+ struct {
+ u16 tcp_gso_segs;
+ u16 tcp_gso_size;
+ };
};
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */
@@ -765,10 +793,10 @@ static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
TCP_SKB_CB(skb)->tcp_gso_segs += segs;
}
-/* This is valid iff tcp_skb_pcount() > 1. */
+/* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
static inline int tcp_skb_mss(const struct sk_buff *skb)
{
- return skb_shinfo(skb)->gso_size;
+ return TCP_SKB_CB(skb)->tcp_gso_size;
}
/* Events passed to congestion control interface */
@@ -1043,14 +1071,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
return tp->is_cwnd_limited;
}
-static inline void tcp_check_probe_timer(struct sock *sk)
+/* Something is really bad, we could not queue an additional packet,
+ * because qdisc is full or receiver sent a 0 window.
+ * We do not want to add fuel to the fire, or abort too early,
+ * so make sure the timer we arm now is at least 200ms in the future,
+ * regardless of current icsk_rto value (as it could be ~2ms)
+ */
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
+}
+
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
+ unsigned long max_when)
+{
+ u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
- if (!tp->packets_out && !icsk->icsk_pending)
+ return (unsigned long)min_t(u64, when, max_when);
+}
+
+static inline void tcp_check_probe_timer(struct sock *sk)
+{
+ if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- icsk->icsk_rto, TCP_RTO_MAX);
+ tcp_probe0_base(sk), TCP_RTO_MAX);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a9ebdf5701e8..602f05b7a275 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -113,6 +113,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_UNSPEC,
BPF_MAP_TYPE_HASH,
BPF_MAP_TYPE_ARRAY,
+ BPF_MAP_TYPE_PROG_ARRAY,
};
enum bpf_prog_type {
@@ -210,6 +211,25 @@ enum bpf_func_id {
* Return: 0 on success
*/
BPF_FUNC_l4_csum_replace,
+
+ /**
+ * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+ * @ctx: context pointer passed to next program
+ * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+ * @index: index inside array that selects specific program to run
+ * Return: 0 on success
+ */
+ BPF_FUNC_tail_call,
+
+ /**
+ * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
+ * @skb: pointer to skb
+ * @ifindex: ifindex of the net device
+ * @flags: bit 0 - if set, redirect to ingress instead of egress
+ * other bits - reserved
+ * Return: 0 on success
+ */
+ BPF_FUNC_clone_redirect,
__BPF_FUNC_MAX_ID,
};
@@ -226,6 +246,10 @@ struct __sk_buff {
__u32 vlan_tci;
__u32 vlan_proto;
__u32 priority;
+ __u32 ingress_ifindex;
+ __u32 ifindex;
+ __u32 tc_index;
+ __u32 cb[5];
};
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 41892f720057..9692cda5f8fc 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -95,11 +95,17 @@ typedef __u32 can_err_mask_t;
* @can_dlc: frame payload length in byte (0 .. 8) aka data length code
* N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
* mapping of the 'data length code' to the real payload length
+ * @__pad: padding
+ * @__res0: reserved / padding
+ * @__res1: reserved / padding
* @data: CAN frame payload (up to 8 byte)
*/
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
+ __u8 __pad; /* padding */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
__u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
};
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3e6184cf2f6d..5079b9d57e31 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -78,6 +78,7 @@ enum {
CGW_FILTER, /* specify struct can_filter on source CAN device */
CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */
CGW_LIM_HOPS, /* limit the number of hops of this specific rule */
+ CGW_MOD_UID, /* user defined identifier for modification updates */
__CGW_MAX
};
@@ -162,6 +163,10 @@ enum {
* load time of the can-gw module). This value is used to reduce the number of
* possible hops for this gateway rule to a value smaller then max_hops.
*
+ * CGW_MOD_UID (length 4 bytes):
+ * Optional non-zero user defined routing job identifier to alter existing
+ * modification settings at runtime.
+ *
* CGW_CS_XOR (length 4 bytes):
* Set a simple XOR checksum starting with an initial value into
* data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2e49fc880d29..cd67aec187d9 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -215,6 +215,11 @@ enum tunable_id {
ETHTOOL_ID_UNSPEC,
ETHTOOL_RX_COPYBREAK,
ETHTOOL_TX_COPYBREAK,
+ /*
+ * Add your fresh new tubale attribute above and remember to update
+ * tunable_strings[] in net/core/ethtool.c
+ */
+ __ETHTOOL_TUNABLE_COUNT,
};
enum tunable_type_id {
@@ -545,6 +550,7 @@ enum ethtool_stringset {
ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
ETH_SS_RSS_HASH_FUNCS,
+ ETH_SS_TUNABLES,
};
/**
@@ -796,6 +802,31 @@ struct ethtool_rx_flow_spec {
__u32 location;
};
+/* How rings are layed out when accessing virtual functions or
+ * offloaded queues is device specific. To allow users to do flow
+ * steering and specify these queues the ring cookie is partitioned
+ * into a 32bit queue index with an 8 bit virtual function id.
+ * This also leaves the 3bytes for further specifiers. It is possible
+ * future devices may support more than 256 virtual functions if
+ * devices start supporting PCIe w/ARI. However at the moment I
+ * do not know of any devices that support this so I do not reserve
+ * space for this at this time. If a future patch consumes the next
+ * byte it should be aware of this possiblity.
+ */
+#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+ return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+ return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
* @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
@@ -1264,15 +1295,19 @@ enum ethtool_sfeatures_retval_bits {
* it was forced up into this mode or autonegotiated.
*/
-/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
+#define SPEED_5000 5000
#define SPEED_10000 10000
#define SPEED_20000 20000
+#define SPEED_25000 25000
#define SPEED_40000 40000
+#define SPEED_50000 50000
#define SPEED_56000 56000
+#define SPEED_100000 100000
#define SPEED_UNKNOWN -1
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index d9cd19214b98..1737b7a8272b 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -390,6 +390,17 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+/* GENEVE section */
+enum {
+ IFLA_GENEVE_UNSPEC,
+ IFLA_GENEVE_ID,
+ IFLA_GENEVE_REMOTE,
+ IFLA_GENEVE_TTL,
+ IFLA_GENEVE_TOS,
+ __IFLA_GENEVE_MAX
+};
+#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+
/* Bonding section */
enum {
@@ -417,6 +428,9 @@ enum {
IFLA_BOND_AD_LACP_RATE,
IFLA_BOND_AD_SELECT,
IFLA_BOND_AD_INFO,
+ IFLA_BOND_AD_ACTOR_SYS_PRIO,
+ IFLA_BOND_AD_USER_PORT_KEY,
+ IFLA_BOND_AD_ACTOR_SYSTEM,
__IFLA_BOND_MAX,
};
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 053bd102fbe0..d3d715f8c88f 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -54,6 +54,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT 18
#define PACKET_TX_HAS_OFF 19
#define PACKET_QDISC_BYPASS 20
+#define PACKET_ROLLOVER_STATS 21
#define PACKET_FANOUT_HASH 0
#define PACKET_FANOUT_LB 1
@@ -75,6 +76,12 @@ struct tpacket_stats_v3 {
unsigned int tp_freeze_q_cnt;
};
+struct tpacket_rollover_stats {
+ __aligned_u64 tp_all;
+ __aligned_u64 tp_huge;
+ __aligned_u64 tp_failed;
+};
+
union tpacket_stats_u {
struct tpacket_stats stats1;
struct tpacket_stats_v3 stats3;
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 589ced069e8a..83d6236a2f08 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -69,6 +69,8 @@ enum {
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
+#define IPPROTO_MPLS IPPROTO_MPLS
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_MAX
@@ -110,6 +112,7 @@ struct in_addr {
#define IP_MINTTL 21
#define IP_NODEFRAG 22
#define IP_CHECKSUM 23
+#define IP_BIND_ADDRESS_NO_PORT 24
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index 2be7bd174751..f6598d1c886e 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,6 +34,7 @@
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
+#define RTF_PCPU 0x40000000
#define RTF_LOCAL 0x80000000
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ef1b1f88ca18..177027cce6b3 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -51,11 +51,17 @@ enum nf_inet_hooks {
NF_INET_NUMHOOKS
};
+enum nf_dev_hooks {
+ NF_NETDEV_INGRESS,
+ NF_NETDEV_NUMHOOKS
+};
+
enum {
NFPROTO_UNSPEC = 0,
NFPROTO_INET = 1,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
+ NFPROTO_NETDEV = 5,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
NFPROTO_DECNET = 12,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 5fa1cd04762e..89a671e0f5e7 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -146,12 +146,14 @@ enum nft_table_flags {
* @NFTA_TABLE_NAME: name of the table (NLA_STRING)
* @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
* @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
+ * @NFTA_TABLE_DEV: net device name (NLA_STRING)
*/
enum nft_table_attributes {
NFTA_TABLE_UNSPEC,
NFTA_TABLE_NAME,
NFTA_TABLE_FLAGS,
NFTA_TABLE_USE,
+ NFTA_TABLE_DEV,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 1a85940f8ab7..3e34b7d702f8 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -108,6 +108,7 @@ struct nlmsgerr {
#define NETLINK_NO_ENOBUFS 5
#define NETLINK_RX_RING 6
#define NETLINK_TX_RING 7
+#define NETLINK_LISTEN_ALL_NSID 8
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 241220c43e86..c0ab6b0a3919 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2620,16 +2620,17 @@ enum nl80211_band_attr {
* an indoor surroundings, i.e., it is connected to AC power (and not
* through portable DC inverters) or is under the control of a master
* that is acting as an AP and is connected to AC power.
- * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this
* channel if it's connected concurrently to a BSS on the same channel on
* the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
- * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
- * channel that has the GO_CONCURRENT attribute set can be done when there
- * is a clear assessment that the device is operating under the guidance of
- * an authorized master, i.e., setting up a GO while the device is also
- * connected to an AP with DFS and radar detection on the UNII band (it is
- * up to user-space, i.e., wpa_supplicant to perform the required
- * verifications)
+ * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS
+ * off-channel on a channel that has the IR_CONCURRENT attribute set can be
+ * done when there is a clear assessment that the device is operating under
+ * the guidance of an authorized master, i.e., setting up a GO or TDLS
+ * off-channel while the device is also connected to an AP with DFS and
+ * radar detection on the UNII band (it is up to user-space, i.e.,
+ * wpa_supplicant to perform the required verifications). Using this
+ * attribute for IR is disallowed for master interfaces (IBSS, AP).
* @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
@@ -2641,7 +2642,7 @@ enum nl80211_band_attr {
* See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
* for more information on the FCC description of the relaxations allowed
* by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
- * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
+ * NL80211_FREQUENCY_ATTR_IR_CONCURRENT.
*/
enum nl80211_frequency_attr {
__NL80211_FREQUENCY_ATTR_INVALID,
@@ -2659,7 +2660,7 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_NO_160MHZ,
NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
- NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+ NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
NL80211_FREQUENCY_ATTR_NO_20MHZ,
NL80211_FREQUENCY_ATTR_NO_10MHZ,
@@ -2672,6 +2673,8 @@ enum nl80211_frequency_attr {
#define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_NO_IBSS NL80211_FREQUENCY_ATTR_NO_IR
#define NL80211_FREQUENCY_ATTR_NO_IR NL80211_FREQUENCY_ATTR_NO_IR
+#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
+ NL80211_FREQUENCY_ATTR_IR_CONCURRENT
/**
* enum nl80211_bitrate_attr - bitrate attributes
@@ -2830,7 +2833,7 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
* base on contiguous rules and wider channels will be allowed to cross
* multiple contiguous/overlapping frequency ranges.
- * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
* @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
* @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
* @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -2847,7 +2850,7 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_IR = 1<<7,
__NL80211_RRF_NO_IBSS = 1<<8,
NL80211_RRF_AUTO_BW = 1<<11,
- NL80211_RRF_GO_CONCURRENT = 1<<12,
+ NL80211_RRF_IR_CONCURRENT = 1<<12,
NL80211_RRF_NO_HT40MINUS = 1<<13,
NL80211_RRF_NO_HT40PLUS = 1<<14,
NL80211_RRF_NO_80MHZ = 1<<15,
@@ -2859,6 +2862,7 @@ enum nl80211_reg_rule_flags {
#define NL80211_RRF_NO_IR NL80211_RRF_NO_IR
#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\
NL80211_RRF_NO_HT40PLUS)
+#define NL80211_RRF_GO_CONCURRENT NL80211_RRF_IR_CONCURRENT
/* For backport compatibility with older userspace */
#define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index bbd49a0c46c7..1dab77601c21 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -153,6 +153,8 @@ enum ovs_packet_cmd {
* flow key against the kernel's.
* @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used
* for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
+ * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional
+ * %OVS_USERSPACE_ATTR_ACTIONS attribute.
* @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
* notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
* %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
@@ -528,6 +530,7 @@ enum ovs_sample_attr {
* copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
* @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
* tunnel info.
+ * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
*/
enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_UNSPEC,
@@ -535,6 +538,7 @@ enum ovs_userspace_attr {
OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port
* to get tunnel info. */
+ OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */
__OVS_USERSPACE_ATTR_MAX
};
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index bf08e76bf505..4f0d1bc3647d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/pkt_sched.h>
+#ifdef __KERNEL__
/* I think i could have done better macros ; for now this is stolen from
* some arch/mips code - jhs
*/
@@ -35,20 +36,6 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
*
* */
-#define TC_MUNGED _TC_MAKEMASK1(0)
-#define SET_TC_MUNGED(v) ( TC_MUNGED | (v & ~TC_MUNGED))
-#define CLR_TC_MUNGED(v) ( v & ~TC_MUNGED)
-
-#define TC_OK2MUNGE _TC_MAKEMASK1(1)
-#define SET_TC_OK2MUNGE(v) ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
-#define CLR_TC_OK2MUNGE(v) ( v & ~TC_OK2MUNGE)
-
-#define S_TC_VERD _TC_MAKE32(2)
-#define M_TC_VERD _TC_MAKEMASK(4,S_TC_VERD)
-#define G_TC_VERD(x) _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
-#define V_TC_VERD(x) _TC_MAKEVALUE(x,S_TC_VERD)
-#define SET_TC_VERD(v,n) ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
-
#define S_TC_FROM _TC_MAKE32(6)
#define M_TC_FROM _TC_MAKEMASK(2,S_TC_FROM)
#define G_TC_FROM(x) _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
@@ -62,18 +49,16 @@ bits 9,10,11: redirect counter - redirect TTL. Loop avoidance
#define SET_TC_NCLS(v) ( TC_NCLS | (v & ~TC_NCLS))
#define CLR_TC_NCLS(v) ( v & ~TC_NCLS)
-#define S_TC_RTTL _TC_MAKE32(9)
-#define M_TC_RTTL _TC_MAKEMASK(3,S_TC_RTTL)
-#define G_TC_RTTL(x) _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL)
-#define V_TC_RTTL(x) _TC_MAKEVALUE(x,S_TC_RTTL)
-#define SET_TC_RTTL(v,n) ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL))
-
#define S_TC_AT _TC_MAKE32(12)
#define M_TC_AT _TC_MAKEMASK(2,S_TC_AT)
#define G_TC_AT(x) _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
#define V_TC_AT(x) _TC_MAKEVALUE(x,S_TC_AT)
#define SET_TC_AT(v,n) ((V_TC_AT(n)) | (v & ~M_TC_AT))
+#define MAX_REC_LOOP 4
+#define MAX_RED_LOOP 4
+#endif
+
/* Action attributes */
enum {
TCA_ACT_UNSPEC,
@@ -93,8 +78,6 @@ enum {
#define TCA_ACT_NOUNBIND 0
#define TCA_ACT_REPLACE 1
#define TCA_ACT_NOREPLACE 0
-#define MAX_REC_LOOP 4
-#define MAX_RED_LOOP 4
#define TC_ACT_UNSPEC (-1)
#define TC_ACT_OK 0
@@ -404,6 +387,36 @@ enum {
#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+/* Flower classifier */
+
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+ __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
/* Extended Matches */
struct tcf_ematch_tree_hdr {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 534b84710745..8d2530daca9f 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -268,7 +268,8 @@ enum {
TCA_GRED_STAB,
TCA_GRED_DPS,
TCA_GRED_MAX_P,
- __TCA_GRED_MAX,
+ TCA_GRED_LIMIT,
+ __TCA_GRED_MAX,
};
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
@@ -679,6 +680,7 @@ enum {
TCA_CODEL_LIMIT,
TCA_CODEL_INTERVAL,
TCA_CODEL_ECN,
+ TCA_CODEL_CE_THRESHOLD,
__TCA_CODEL_MAX
};
@@ -695,6 +697,7 @@ struct tc_codel_xstats {
__u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
__u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
__u32 dropping; /* are we in dropping state ? */
+ __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
};
/* FQ_CODEL */
@@ -707,6 +710,7 @@ enum {
TCA_FQ_CODEL_ECN,
TCA_FQ_CODEL_FLOWS,
TCA_FQ_CODEL_QUANTUM,
+ TCA_FQ_CODEL_CE_THRESHOLD,
__TCA_FQ_CODEL_MAX
};
@@ -730,6 +734,7 @@ struct tc_fq_codel_qd_stats {
*/
__u32 new_flows_len; /* count of flows in new list */
__u32 old_flows_len; /* count of flows in old list */
+ __u32 ce_mark; /* packets above ce_threshold */
};
struct tc_fq_codel_cl_stats {
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 91950950aa59..0f9265cb2a96 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -38,6 +38,8 @@
#define RDS_IB_ABI_VERSION 0x301
+#define SOL_RDS 276
+
/*
* setsockopt/getsockopt for SOL_RDS
*/
@@ -48,6 +50,14 @@
#define RDS_RECVERR 5
#define RDS_CONG_MONITOR 6
#define RDS_GET_MR_FOR_DEST 7
+#define SO_RDS_TRANSPORT 8
+
+/* supported values for SO_RDS_TRANSPORT */
+#define RDS_TRANS_IB 0
+#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_TCP 2
+#define RDS_TRANS_COUNT 3
+#define RDS_TRANS_NONE (~0)
/*
* Control message types for SOL_RDS.
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 6a6fb747c78d..eee8968407f0 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -276,6 +276,8 @@ enum
LINUX_MIB_TCPACKSKIPPEDFINWAIT2, /* TCPACKSkippedFinWait2 */
LINUX_MIB_TCPACKSKIPPEDTIMEWAIT, /* TCPACKSkippedTimeWait */
LINUX_MIB_TCPACKSKIPPEDCHALLENGE, /* TCPACKSkippedChallenge */
+ LINUX_MIB_TCPWINPROBE, /* TCPWinProbe */
+ LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index faa72f4fa547..65a77b071e22 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -113,6 +113,8 @@ enum {
#define TCP_TIMESTAMP 24
#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
+#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
+#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
struct tcp_repair_opt {
__u32 opt_code;
@@ -190,8 +192,10 @@ struct tcp_info {
__u64 tcpi_pacing_rate;
__u64 tcpi_max_pacing_rate;
- __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+ __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
__u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
+ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
+ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
};
/* for TCP_MD5SIG socket option */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 8a6616583f38..cb31229a6fa4 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -14,12 +14,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mm.h>
-
-struct bpf_array {
- struct bpf_map map;
- u32 elem_size;
- char value[0] __aligned(8);
-};
+#include <linux/filter.h>
/* Called from syscall */
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
@@ -154,3 +149,109 @@ static int __init register_array_map(void)
return 0;
}
late_initcall(register_array_map);
+
+static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+{
+ /* only bpf_prog file descriptors can be stored in prog_array map */
+ if (attr->value_size != sizeof(u32))
+ return ERR_PTR(-EINVAL);
+ return array_map_alloc(attr);
+}
+
+static void prog_array_map_free(struct bpf_map *map)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ int i;
+
+ synchronize_rcu();
+
+ /* make sure it's empty */
+ for (i = 0; i < array->map.max_entries; i++)
+ BUG_ON(array->prog[i] != NULL);
+ kvfree(array);
+}
+
+static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ return NULL;
+}
+
+/* only called from syscall */
+static int prog_array_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct bpf_prog *prog, *old_prog;
+ u32 index = *(u32 *)key, ufd;
+
+ if (map_flags != BPF_ANY)
+ return -EINVAL;
+
+ if (index >= array->map.max_entries)
+ return -E2BIG;
+
+ ufd = *(u32 *)value;
+ prog = bpf_prog_get(ufd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ if (!bpf_prog_array_compatible(array, prog)) {
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
+
+ old_prog = xchg(array->prog + index, prog);
+ if (old_prog)
+ bpf_prog_put_rcu(old_prog);
+
+ return 0;
+}
+
+static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct bpf_prog *old_prog;
+ u32 index = *(u32 *)key;
+
+ if (index >= array->map.max_entries)
+ return -E2BIG;
+
+ old_prog = xchg(array->prog + index, NULL);
+ if (old_prog) {
+ bpf_prog_put_rcu(old_prog);
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+}
+
+/* decrement refcnt of all bpf_progs that are stored in this map */
+void bpf_prog_array_map_clear(struct bpf_map *map)
+{
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ int i;
+
+ for (i = 0; i < array->map.max_entries; i++)
+ prog_array_map_delete_elem(map, &i);
+}
+
+static const struct bpf_map_ops prog_array_ops = {
+ .map_alloc = prog_array_map_alloc,
+ .map_free = prog_array_map_free,
+ .map_get_next_key = array_map_get_next_key,
+ .map_lookup_elem = prog_array_map_lookup_elem,
+ .map_update_elem = prog_array_map_update_elem,
+ .map_delete_elem = prog_array_map_delete_elem,
+};
+
+static struct bpf_map_type_list prog_array_type __read_mostly = {
+ .ops = &prog_array_ops,
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+};
+
+static int __init register_prog_array_map(void)
+{
+ bpf_register_map_type(&prog_array_type);
+ return 0;
+}
+late_initcall(register_prog_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 54f0e7fcd0e2..1e00aa3316dc 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -26,9 +26,10 @@
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/moduleloader.h>
-#include <asm/unaligned.h>
#include <linux/bpf.h>
+#include <asm/unaligned.h>
+
/* Registers */
#define BPF_R0 regs[BPF_REG_0]
#define BPF_R1 regs[BPF_REG_1]
@@ -62,6 +63,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
else if (k >= SKF_LL_OFF)
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
return ptr;
@@ -244,6 +246,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
+ [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
/* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA,
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
@@ -286,6 +289,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
};
+ u32 tail_call_cnt = 0;
void *ptr;
int off;
@@ -431,6 +435,30 @@ select_insn:
BPF_R4, BPF_R5);
CONT;
+ JMP_TAIL_CALL: {
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct bpf_prog *prog;
+ u64 index = BPF_R3;
+
+ if (unlikely(index >= array->map.max_entries))
+ goto out;
+
+ if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
+ goto out;
+
+ tail_call_cnt++;
+
+ prog = READ_ONCE(array->prog[index]);
+ if (unlikely(!prog))
+ goto out;
+
+ ARG1 = BPF_R1;
+ insn = prog->insnsi;
+ goto select_insn;
+out:
+ CONT;
+ }
/* JMP */
JMP_JA:
insn += insn->off;
@@ -615,25 +643,63 @@ load_byte:
return 0;
}
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+bool bpf_prog_array_compatible(struct bpf_array *array,
+ const struct bpf_prog *fp)
{
+ if (!array->owner_prog_type) {
+ /* There's no owner yet where we could check for
+ * compatibility.
+ */
+ array->owner_prog_type = fp->type;
+ array->owner_jited = fp->jited;
+
+ return true;
+ }
+
+ return array->owner_prog_type == fp->type &&
+ array->owner_jited == fp->jited;
+}
+
+static int bpf_check_tail_call(const struct bpf_prog *fp)
+{
+ struct bpf_prog_aux *aux = fp->aux;
+ int i;
+
+ for (i = 0; i < aux->used_map_cnt; i++) {
+ struct bpf_map *map = aux->used_maps[i];
+ struct bpf_array *array;
+
+ if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+ continue;
+
+ array = container_of(map, struct bpf_array, map);
+ if (!bpf_prog_array_compatible(array, fp))
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
- * bpf_prog_select_runtime - select execution runtime for BPF program
+ * bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program
*
- * try to JIT internal BPF program, if JIT is not available select interpreter
- * BPF program will be executed via BPF_PROG_RUN() macro
+ * Try to JIT eBPF program, if JIT is not available, use interpreter.
+ * The BPF program will be executed via BPF_PROG_RUN() macro.
*/
-void bpf_prog_select_runtime(struct bpf_prog *fp)
+int bpf_prog_select_runtime(struct bpf_prog *fp)
{
fp->bpf_func = (void *) __bpf_prog_run;
- /* Probe if internal BPF can be JITed */
bpf_int_jit_compile(fp);
- /* Lock whole bpf_prog as read-only */
bpf_prog_lock_ro(fp);
+
+ /* The tail call compatibility check can only be done at
+ * this late stage as we need to determine, if we deal
+ * with JITed or non JITed program concatenations and not
+ * all eBPF JITs might immediately support all features.
+ */
+ return bpf_check_tail_call(fp);
}
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
@@ -663,6 +729,22 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
+const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
+
+/* Always built-in helper functions. */
+const struct bpf_func_proto bpf_tail_call_proto = {
+ .func = NULL,
+ .gpl_only = false,
+ .ret_type = RET_VOID,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+};
+
+/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
+void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+{
+}
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index bd7f5988ed9c..7ad5d8842d5b 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -13,6 +13,7 @@
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <linux/smp.h>
+#include <linux/ktime.h>
/* If kernel subsystem is allowing eBPF programs to call this function,
* inside its own verifier_ops->get_func_proto() callback it should return
@@ -44,11 +45,11 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
- .func = bpf_map_lookup_elem,
- .gpl_only = false,
- .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_PTR_TO_MAP_KEY,
+ .func = bpf_map_lookup_elem,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_KEY,
};
static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -63,13 +64,13 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
const struct bpf_func_proto bpf_map_update_elem_proto = {
- .func = bpf_map_update_elem,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_PTR_TO_MAP_KEY,
- .arg3_type = ARG_PTR_TO_MAP_VALUE,
- .arg4_type = ARG_ANYTHING,
+ .func = bpf_map_update_elem,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_KEY,
+ .arg3_type = ARG_PTR_TO_MAP_VALUE,
+ .arg4_type = ARG_ANYTHING,
};
static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -83,11 +84,11 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
}
const struct bpf_func_proto bpf_map_delete_elem_proto = {
- .func = bpf_map_delete_elem,
- .gpl_only = false,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_PTR_TO_MAP_KEY,
+ .func = bpf_map_delete_elem,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_MAP_KEY,
};
static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -111,3 +112,15 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
};
+
+static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ /* NMI safe access to clock monotonic */
+ return ktime_get_mono_fast_ns();
+}
+
+const struct bpf_func_proto bpf_ktime_get_ns_proto = {
+ .func = bpf_ktime_get_ns,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3bae6c591914..a1b14d197a4f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -68,6 +68,12 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
{
struct bpf_map *map = filp->private_data;
+ if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+ /* prog_array stores refcnt-ed bpf_prog pointers
+ * release them all when user space closes prog_array_fd
+ */
+ bpf_prog_array_map_clear(map);
+
bpf_map_put(map);
return 0;
}
@@ -392,6 +398,19 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
*/
BUG_ON(!prog->aux->ops->get_func_proto);
+ if (insn->imm == BPF_FUNC_tail_call) {
+ /* mark bpf_tail_call as different opcode
+ * to avoid conditional branch in
+ * interpeter for every normal call
+ * and to prevent accidental JITing by
+ * JIT compiler that doesn't support
+ * bpf_tail_call yet
+ */
+ insn->imm = 0;
+ insn->code |= BPF_X;
+ continue;
+ }
+
fn = prog->aux->ops->get_func_proto(insn->imm);
/* all functions that have prototype and verifier allowed
* programs to call them, must be real in-kernel functions
@@ -413,6 +432,23 @@ static void free_used_maps(struct bpf_prog_aux *aux)
kfree(aux->used_maps);
}
+static void __prog_put_rcu(struct rcu_head *rcu)
+{
+ struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
+
+ free_used_maps(aux);
+ bpf_prog_free(aux->prog);
+}
+
+/* version of bpf_prog_put() that is called after a grace period */
+void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+ if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ prog->aux->prog = prog;
+ call_rcu(&prog->aux->rcu, __prog_put_rcu);
+ }
+}
+
void bpf_prog_put(struct bpf_prog *prog)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
@@ -426,7 +462,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
{
struct bpf_prog *prog = filp->private_data;
- bpf_prog_put(prog);
+ bpf_prog_put_rcu(prog);
return 0;
}
@@ -532,7 +568,9 @@ static int bpf_prog_load(union bpf_attr *attr)
fixup_bpf_calls(prog);
/* eBPF program is ready to be JITed */
- bpf_prog_select_runtime(prog);
+ err = bpf_prog_select_runtime(prog);
+ if (err < 0)
+ goto free_used_maps;
err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
if (err < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 47dcd3aa6e23..039d866fd36a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -907,6 +907,23 @@ static int check_call(struct verifier_env *env, int func_id)
fn->ret_type, func_id);
return -EINVAL;
}
+
+ if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
+ func_id != BPF_FUNC_tail_call)
+ /* prog_array map type needs extra care:
+ * only allow to pass it into bpf_tail_call() for now.
+ * bpf_map_delete_elem() can be allowed in the future,
+ * while bpf_map_update_elem() must only be done via syscall
+ */
+ return -EINVAL;
+
+ if (func_id == BPF_FUNC_tail_call &&
+ map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+ /* don't allow any other map type to be passed into
+ * bpf_tail_call()
+ */
+ return -EINVAL;
+
return 0;
}
@@ -1675,6 +1692,8 @@ static int do_check(struct verifier_env *env)
}
} else if (class == BPF_STX) {
+ enum bpf_reg_type dst_reg_type;
+
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
if (err)
@@ -1683,11 +1702,6 @@ static int do_check(struct verifier_env *env)
continue;
}
- if (BPF_MODE(insn->code) != BPF_MEM ||
- insn->imm != 0) {
- verbose("BPF_STX uses reserved fields\n");
- return -EINVAL;
- }
/* check src1 operand */
err = check_reg_arg(regs, insn->src_reg, SRC_OP);
if (err)
@@ -1697,6 +1711,8 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
+ dst_reg_type = regs[insn->dst_reg].type;
+
/* check that memory (dst_reg + off) is writeable */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
@@ -1704,6 +1720,15 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
+ if (insn->imm == 0) {
+ insn->imm = dst_reg_type;
+ } else if (dst_reg_type != insn->imm &&
+ (dst_reg_type == PTR_TO_CTX ||
+ insn->imm == PTR_TO_CTX)) {
+ verbose("same insn cannot be used with different pointers\n");
+ return -EINVAL;
+ }
+
} else if (class == BPF_ST) {
if (BPF_MODE(insn->code) != BPF_MEM ||
insn->src_reg != BPF_REG_0) {
@@ -1822,12 +1847,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
- (BPF_MODE(insn->code) != BPF_MEM ||
- insn->imm != 0)) {
+ (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
verbose("BPF_LDX uses reserved fields\n");
return -EINVAL;
}
+ if (BPF_CLASS(insn->code) == BPF_STX &&
+ ((BPF_MODE(insn->code) != BPF_MEM &&
+ BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
+ verbose("BPF_STX uses reserved fields\n");
+ return -EINVAL;
+ }
+
if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
struct bpf_map *map;
struct fd f;
@@ -1950,12 +1981,17 @@ static int convert_ctx_accesses(struct verifier_env *env)
struct bpf_prog *new_prog;
u32 cnt;
int i;
+ enum bpf_access_type type;
if (!env->prog->aux->ops->convert_ctx_access)
return 0;
for (i = 0; i < insn_cnt; i++, insn++) {
- if (insn->code != (BPF_LDX | BPF_MEM | BPF_W))
+ if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
+ type = BPF_READ;
+ else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
+ type = BPF_WRITE;
+ else
continue;
if (insn->imm != PTR_TO_CTX) {
@@ -1965,7 +2001,7 @@ static int convert_ctx_accesses(struct verifier_env *env)
}
cnt = env->prog->aux->ops->
- convert_ctx_access(insn->dst_reg, insn->src_reg,
+ convert_ctx_access(type, insn->dst_reg, insn->src_reg,
insn->off, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 4f44028943e6..245df6b32b81 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -346,16 +346,13 @@ static inline void seccomp_sync_threads(void)
*/
static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
{
- struct seccomp_filter *filter;
- unsigned long fp_size;
- struct sock_filter *fp;
- int new_len;
- long ret;
+ struct seccomp_filter *sfilter;
+ int ret;
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
+
BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
- fp_size = fprog->len * sizeof(struct sock_filter);
/*
* Installing a seccomp filter requires that the task has
@@ -368,60 +365,21 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
CAP_SYS_ADMIN) != 0)
return ERR_PTR(-EACCES);
- fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
- if (!fp)
- return ERR_PTR(-ENOMEM);
-
- /* Copy the instructions from fprog. */
- ret = -EFAULT;
- if (copy_from_user(fp, fprog->filter, fp_size))
- goto free_prog;
-
- /* Check and rewrite the fprog via the skb checker */
- ret = bpf_check_classic(fp, fprog->len);
- if (ret)
- goto free_prog;
-
- /* Check and rewrite the fprog for seccomp use */
- ret = seccomp_check_filter(fp, fprog->len);
- if (ret)
- goto free_prog;
-
- /* Convert 'sock_filter' insns to 'bpf_insn' insns */
- ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
- if (ret)
- goto free_prog;
-
/* Allocate a new seccomp_filter */
- ret = -ENOMEM;
- filter = kzalloc(sizeof(struct seccomp_filter),
- GFP_KERNEL|__GFP_NOWARN);
- if (!filter)
- goto free_prog;
-
- filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
- if (!filter->prog)
- goto free_filter;
-
- ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
- if (ret)
- goto free_filter_prog;
-
- kfree(fp);
- atomic_set(&filter->usage, 1);
- filter->prog->len = new_len;
+ sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
+ if (!sfilter)
+ return ERR_PTR(-ENOMEM);
- bpf_prog_select_runtime(filter->prog);
+ ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
+ seccomp_check_filter);
+ if (ret < 0) {
+ kfree(sfilter);
+ return ERR_PTR(ret);
+ }
- return filter;
+ atomic_set(&sfilter->usage, 1);
-free_filter_prog:
- __bpf_prog_free(filter->prog);
-free_filter:
- kfree(filter);
-free_prog:
- kfree(fp);
- return ERR_PTR(ret);
+ return sfilter;
}
/**
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 2d56ce501632..50c4015a8ad3 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -79,18 +79,6 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
.arg3_type = ARG_ANYTHING,
};
-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
- /* NMI safe access to clock monotonic */
- return ktime_get_mono_fast_ns();
-}
-
-static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
- .func = bpf_ktime_get_ns,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
-};
-
/*
* limited trace_printk()
* only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
@@ -172,6 +160,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
return &bpf_probe_read_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
+ case BPF_FUNC_tail_call:
+ return &bpf_tail_call_proto;
case BPF_FUNC_trace_printk:
/*
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 8609378e6505..a60a6d335a91 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -585,7 +585,6 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
struct bucket_table *tbl = iter->walker->tbl;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
- void *obj = NULL;
if (p) {
p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
@@ -605,8 +604,7 @@ next:
if (!rht_is_a_nulls(p)) {
iter->skip++;
iter->p = p;
- obj = rht_obj(ht, p);
- goto out;
+ return rht_obj(ht, p);
}
iter->skip = 0;
@@ -624,9 +622,7 @@ next:
iter->p = NULL;
-out:
-
- return obj;
+ return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 80d78c51f65f..7f58c735d745 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/random.h>
/* General test specific settings */
#define MAX_SUBTESTS 3
@@ -67,6 +68,10 @@ struct bpf_test {
union {
struct sock_filter insns[MAX_INSNS];
struct bpf_insn insns_int[MAX_INSNS];
+ struct {
+ void *insns;
+ unsigned int len;
+ } ptr;
} u;
__u8 aux;
__u8 data[MAX_DATA];
@@ -74,8 +79,282 @@ struct bpf_test {
int data_size;
__u32 result;
} test[MAX_SUBTESTS];
+ int (*fill_helper)(struct bpf_test *self);
};
+/* Large test cases need separate allocation and fill handler. */
+
+static int bpf_fill_maxinsns1(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ __u32 k = ~0;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++, k--)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns2(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns3(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ struct rnd_state rnd;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+
+ for (i = 0; i < len - 1; i++) {
+ __u32 k = prandom_u32_state(&rnd);
+
+ insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns4(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS + 1;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns5(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
+
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns6(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+ SKF_AD_VLAN_TAG_PRESENT);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns7(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 4; i++)
+ insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+ SKF_AD_CPU);
+
+ insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
+ insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+ SKF_AD_CPU);
+ insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns8(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i, jmp_off = len - 3;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);
+
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns9(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
+ insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
+ insn[2] = BPF_EXIT_INSN();
+
+ for (i = 3; i < len - 2; i++)
+ insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);
+
+ insn[len - 2] = BPF_EXIT_INSN();
+ insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns10(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS, hlen = len - 2;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < hlen / 2; i++)
+ insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
+ for (i = hlen - 1; i > hlen / 2; i--)
+ insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);
+
+ insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
+ insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
+ insn[hlen + 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
+ unsigned int plen)
+{
+ struct sock_filter *insn;
+ unsigned int rlen;
+ int i, j;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ rlen = (len % plen) - 1;
+
+ for (i = 0; i + plen < len; i += plen)
+ for (j = 0; j < plen; j++)
+ insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
+ plen - 1 - j, 0, 0);
+ for (j = 0; j < rlen; j++)
+ insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
+ 0, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns11(struct bpf_test *self)
+{
+ /* Hits 70 passes on x86_64, so cannot get JITed there. */
+ return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
+}
+
+static int bpf_fill_ja(struct bpf_test *self)
+{
+ /* Hits exactly 11 passes on x86_64 JIT. */
+ return __bpf_fill_ja(self, 12, 9);
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -1755,7 +2034,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
BPF_EXIT_INSN(),
- BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R0, 0x1ffffffffLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -1805,6 +2085,2313 @@ static struct bpf_test tests[] = {
0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
{ { 38, 256 } }
},
+ /* BPF_ALU | BPF_MOV | BPF_X */
+ {
+ "ALU_MOV_X: dst = 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_MOV_X: dst = 4294967295",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU64_MOV_X: dst = 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOV_X: dst = 4294967295",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ /* BPF_ALU | BPF_MOV | BPF_K */
+ {
+ "ALU_MOV_K: dst = 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_MOV_K: dst = 4294967295",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
+ BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOV_K: dst = 2",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOV_K: dst = 2147483647",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_OR_K: dst = 0x0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0),
+ BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOV_K: dst = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_ADD | BPF_X */
+ {
+ "ALU_ADD_X: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_ADD_X: 1 + 4294967294 = 4294967295",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU64_ADD_X: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_ADD_X: 1 + 4294967294 = 4294967295",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ /* BPF_ALU | BPF_ADD | BPF_K */
+ {
+ "ALU_ADD_K: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_ADD_K: 3 + 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_ADD, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_ADD_K: 1 + 4294967294 = 4294967295",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x00000000ffffffff),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_ADD_K: 3 + 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_ADD_K: 1 + 2147483646 = 2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483646),
+ BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ {
+ "ALU64_ADD_K: 1 + 0 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x1),
+ BPF_LD_IMM64(R3, 0x1),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_SUB | BPF_X */
+ {
+ "ALU_SUB_X: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_SUB_X: 4294967295 - 4294967294 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU32_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_SUB_X: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_SUB_X: 4294967295 - 4294967294 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_ALU | BPF_SUB | BPF_K */
+ {
+ "ALU_SUB_K: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_SUB, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_SUB_K: 3 - 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_SUB, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_SUB_K: 4294967295 - 4294967294 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_SUB_K: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_SUB, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_SUB_K: 3 - 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_SUB, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_SUB_K: 4294967294 - 4294967295 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967294U),
+ BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ {
+ "ALU64_ADD_K: 2147483646 - 2147483647 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483646),
+ BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU | BPF_MUL | BPF_X */
+ {
+ "ALU_MUL_X: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 3),
+ BPF_ALU32_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
+ BPF_ALU32_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xFFFFFFF0 } },
+ },
+ {
+ "ALU_MUL_X: -1 * -1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -1),
+ BPF_ALU32_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_MUL_X: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 3),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU64_MUL_X: 1 * 2147483647 = 2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ /* BPF_ALU | BPF_MUL | BPF_K */
+ {
+ "ALU_MUL_K: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MUL, R0, 3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU_MUL_K: 3 * 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MUL, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xFFFFFFF0 } },
+ },
+ {
+ "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x1),
+ BPF_LD_IMM64(R3, 0x00000000ffffffff),
+ BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MUL_K: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU64_IMM(BPF_MUL, R0, 3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU64_MUL_K: 3 * 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_MUL, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_MUL_K: 1 * 2147483647 = 2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_MUL_K: 1 * -2147483647 = -2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -2147483647 } },
+ },
+ {
+ "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x1),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_DIV | BPF_X */
+ {
+ "ALU_DIV_X: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_DIV_X: 4294967295 / 4294967295 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+ BPF_ALU32_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_DIV_X: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_DIV_X: 2147483647 / 2147483647 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+ BPF_ALU64_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0x0000000000000001LL),
+ BPF_ALU64_REG(BPF_DIV, R2, R4),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_DIV | BPF_K */
+ {
+ "ALU_DIV_K: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU32_IMM(BPF_DIV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_DIV_K: 3 / 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_DIV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_DIV_K: 4294967295 / 4294967295 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0x1UL),
+ BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_DIV_K: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU64_IMM(BPF_DIV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_DIV_K: 3 / 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_DIV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_DIV_K: 2147483647 / 2147483647 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0x0000000000000001LL),
+ BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_X */
+ {
+ "ALU_MOD_X: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_MOD_X: 4294967295 % 4294967293 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
+ BPF_ALU32_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOD_X: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_MOD_X: 2147483647 % 2147483645 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
+ BPF_ALU64_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_K */
+ {
+ "ALU_MOD_K: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_MOD_K: 3 % 1 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "ALU_MOD_K: 4294967295 % 4294967293 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOD_K: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_MOD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_MOD_K: 3 % 1 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_MOD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "ALU64_MOD_K: 2147483647 % 2147483645 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ /* BPF_ALU | BPF_AND | BPF_X */
+ {
+ "ALU_AND_X: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU32_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_AND_X: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU64_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ /* BPF_ALU | BPF_AND | BPF_K */
+ {
+ "ALU_AND_K: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_AND, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_AND_K: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_AND, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000000000000000LL),
+ BPF_ALU64_IMM(BPF_AND, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+ BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_OR | BPF_X */
+ {
+ "ALU_OR_X: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU32_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_OR_X: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ /* BPF_ALU | BPF_OR | BPF_K */
+ {
+ "ALU_OR_K: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_OR, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_OR_K: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_OR, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+ BPF_ALU64_IMM(BPF_OR, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000000000000000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_XOR | BPF_X */
+ {
+ "ALU_XOR_X: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU32_IMM(BPF_MOV, R1, 6),
+ BPF_ALU32_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU32_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ {
+ "ALU64_XOR_X: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU32_IMM(BPF_MOV, R1, 6),
+ BPF_ALU64_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU64_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ /* BPF_ALU | BPF_XOR | BPF_K */
+ {
+ "ALU_XOR_K: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU32_IMM(BPF_XOR, R0, 6),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ {
+ "ALU64_XOR_K: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU64_IMM(BPF_XOR, R0, 6),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ {
+ "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+ BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
+ BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000000000000000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_LSH | BPF_X */
+ {
+ "ALU_LSH_X: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_LSH_X: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ {
+ "ALU64_LSH_X: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_LSH_X: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ /* BPF_ALU | BPF_LSH | BPF_K */
+ {
+ "ALU_LSH_K: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_LSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_LSH_K: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_LSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ {
+ "ALU64_LSH_K: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_LSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_LSH_K: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_LSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ /* BPF_ALU | BPF_RSH | BPF_X */
+ {
+ "ALU_RSH_X: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_RSH_X: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_X: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_X: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_ALU | BPF_RSH | BPF_K */
+ {
+ "ALU_RSH_K: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_RSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_RSH_K: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU32_IMM(BPF_RSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_K: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU64_IMM(BPF_RSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_K: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU64_IMM(BPF_RSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_ALU | BPF_ARSH | BPF_X */
+ {
+ "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 40),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff00ff } },
+ },
+ /* BPF_ALU | BPF_ARSH | BPF_K */
+ {
+ "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff00ff } },
+ },
+ /* BPF_ALU | BPF_NEG */
+ {
+ "ALU_NEG: -(3) = -3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 3),
+ BPF_ALU32_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ {
+ "ALU_NEG: -(-3) = 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -3),
+ BPF_ALU32_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_NEG: -(3) = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ {
+ "ALU64_NEG: -(-3) = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -3),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_BE */
+ {
+ "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be16(0xcdef) } },
+ },
+ {
+ "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be32(0x89abcdef) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_LE */
+ {
+ "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le16(0xcdef) } },
+ },
+ {
+ "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le32(0x89abcdef) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
+ },
+ /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
+ {
+ "ST_MEM_B: Store/Load byte: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_B, R10, -40, 0xff),
+ BPF_LDX_MEM(BPF_B, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xff } },
+ },
+ {
+ "ST_MEM_B: Store/Load byte: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7f } },
+ },
+ {
+ "STX_MEM_B: Store/Load byte: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffLL),
+ BPF_STX_MEM(BPF_B, R10, R1, -40),
+ BPF_LDX_MEM(BPF_B, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xff } },
+ },
+ {
+ "ST_MEM_H: Store/Load half word: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff } },
+ },
+ {
+ "ST_MEM_H: Store/Load half word: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7fff } },
+ },
+ {
+ "STX_MEM_H: Store/Load half word: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffffLL),
+ BPF_STX_MEM(BPF_H, R10, R1, -40),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff } },
+ },
+ {
+ "ST_MEM_W: Store/Load word: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ST_MEM_W: Store/Load word: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7fffffff } },
+ },
+ {
+ "STX_MEM_W: Store/Load word: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffLL),
+ BPF_STX_MEM(BPF_W, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ST_MEM_DW: Store/Load double word: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ST_MEM_DW: Store/Load double word: max negative 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+ BPF_LDX_MEM(BPF_DW, R2, R10, -40),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ST_MEM_DW: Store/Load double word: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7fffffff } },
+ },
+ {
+ "STX_MEM_DW: Store/Load double word: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_STX_MEM(BPF_W, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ /* BPF_STX | BPF_XADD | BPF_W/DW */
+ {
+ "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_W, R10, R0, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x22 } },
+ },
+ {
+ "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+ BPF_STX_XADD(BPF_DW, R10, R0, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x22 } },
+ },
+ /* BPF_JMP | BPF_EXIT */
+ {
+ "JMP_EXIT",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x4711 } },
+ },
+ /* BPF_JMP | BPF_JA */
+ {
+ "JMP_JA: Unconditional jump: if (true) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGT | BPF_K */
+ {
+ "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGE | BPF_K */
+ {
+ "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGT | BPF_K */
+ {
+ "JMP_JGT_K: if (3 > 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGE | BPF_K */
+ {
+ "JMP_JGE_K: if (3 >= 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
+ {
+ "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
+ .u.insns_int = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+ BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
+ BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_K: if (3 >= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JNE | BPF_K */
+ {
+ "JMP_JNE_K: if (3 != 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JEQ | BPF_K */
+ {
+ "JMP_JEQ_K: if (3 == 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSET | BPF_K */
+ {
+ "JMP_JSET_K: if (0x3 & 0x2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGT | BPF_X */
+ {
+ "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGE | BPF_X */
+ {
+ "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGT | BPF_X */
+ {
+ "JMP_JGT_X: if (3 > 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGE | BPF_X */
+ {
+ "JMP_JGE_X: if (3 >= 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_X: if (3 >= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JNE | BPF_X */
+ {
+ "JMP_JNE_X: if (3 != 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JEQ | BPF_X */
+ {
+ "JMP_JEQ_X: if (3 == 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSET | BPF_X */
+ {
+ "JMP_JSET_X: if (0x3 & 0x2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JA: Jump, gap, jump, ...",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xababcbac } },
+ .fill_helper = bpf_fill_ja,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Maximum possible literals",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xffffffff } },
+ .fill_helper = bpf_fill_maxinsns1,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Single literal",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xfefefefe } },
+ .fill_helper = bpf_fill_maxinsns2,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Run/add until end",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x947bf368 } },
+ .fill_helper = bpf_fill_maxinsns3,
+ },
+ {
+ "BPF_MAXINSNS: Too many instructions",
+ { },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = bpf_fill_maxinsns4,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Very long jump",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xabababab } },
+ .fill_helper = bpf_fill_maxinsns5,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Ctx heavy transformations",
+ { },
+ CLASSIC,
+ { },
+ {
+ { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+ { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ },
+ .fill_helper = bpf_fill_maxinsns6,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Call heavy transformations",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 1, 0 }, { 10, 0 } },
+ .fill_helper = bpf_fill_maxinsns7,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Jump heavy test",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xffffffff } },
+ .fill_helper = bpf_fill_maxinsns8,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Very long jump backwards",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0xcbababab } },
+ .fill_helper = bpf_fill_maxinsns9,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Edge hopping nuthouse",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0xabababac } },
+ .fill_helper = bpf_fill_maxinsns10,
+ },
+ {
+ "BPF_MAXINSNS: Jump, gap, jump, ...",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xababcbac } },
+ .fill_helper = bpf_fill_maxinsns11,
+ },
};
static struct net_device dev;
@@ -1858,10 +4445,15 @@ static void release_test_data(const struct bpf_test *test, void *data)
kfree_skb(data);
}
-static int probe_filter_length(struct sock_filter *fp)
+static int filter_length(int which)
{
- int len = 0;
+ struct sock_filter *fp;
+ int len;
+ if (tests[which].fill_helper)
+ return tests[which].u.ptr.len;
+
+ fp = tests[which].u.insns;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].k != 0)
break;
@@ -1869,16 +4461,25 @@ static int probe_filter_length(struct sock_filter *fp)
return len + 1;
}
+static void *filter_pointer(int which)
+{
+ if (tests[which].fill_helper)
+ return tests[which].u.ptr.insns;
+ else
+ return tests[which].u.insns;
+}
+
static struct bpf_prog *generate_filter(int which, int *err)
{
- struct bpf_prog *fp;
- struct sock_fprog_kern fprog;
- unsigned int flen = probe_filter_length(tests[which].u.insns);
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+ unsigned int flen = filter_length(which);
+ void *fptr = filter_pointer(which);
+ struct sock_fprog_kern fprog;
+ struct bpf_prog *fp;
switch (test_type) {
case CLASSIC:
- fprog.filter = tests[which].u.insns;
+ fprog.filter = fptr;
fprog.len = flen;
*err = bpf_prog_create(&fp, &fprog);
@@ -1914,8 +4515,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
}
fp->len = flen;
- memcpy(fp->insnsi, tests[which].u.insns_int,
- fp->len * sizeof(struct bpf_insn));
+ memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
bpf_prog_select_runtime(fp);
break;
@@ -1987,9 +4587,33 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
return err_cnt;
}
+static __init int prepare_bpf_tests(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (tests[i].fill_helper &&
+ tests[i].fill_helper(&tests[i]) < 0)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static __init void destroy_bpf_tests(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (tests[i].fill_helper)
+ kfree(tests[i].u.ptr.insns);
+ }
+}
+
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
+ int jit_cnt = 0, run_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_prog *fp;
@@ -2006,6 +4630,13 @@ static __init int test_bpf(void)
return err;
}
+
+ pr_cont("jited:%u ", fp->jited);
+
+ run_cnt++;
+ if (fp->jited)
+ jit_cnt++;
+
err = run_one(fp, &tests[i]);
release_filter(fp, i);
@@ -2018,13 +4649,24 @@ static __init int test_bpf(void)
}
}
- pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+ pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+ pass_cnt, err_cnt, jit_cnt, run_cnt);
+
return err_cnt ? -EINVAL : 0;
}
static int __init test_bpf_init(void)
{
- return test_bpf();
+ int ret;
+
+ ret = prepare_bpf_tests();
+ if (ret < 0)
+ return ret;
+
+ ret = test_bpf();
+
+ destroy_bpf_tests();
+ return ret;
}
static void __exit test_bpf_exit(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index b2957540d3c7..c90777eae1f8 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -1,14 +1,9 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
- * Code partially derived from nft_hash
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -26,20 +21,37 @@
#include <linux/rhashtable.h>
#include <linux/slab.h>
+#define MAX_ENTRIES 1000000
+#define TEST_INSERT_FAIL INT_MAX
+
+static int entries = 50000;
+module_param(entries, int, 0);
+MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)");
+
+static int runs = 4;
+module_param(runs, int, 0);
+MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
+
+static int max_size = 65536;
+module_param(max_size, int, 0);
+MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
-#define TEST_HT_SIZE 8
-#define TEST_ENTRIES 2048
-#define TEST_PTR ((void *) 0xdeadbeef)
-#define TEST_NEXPANDS 4
+static bool shrinking = false;
+module_param(shrinking, bool, 0);
+MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
+
+static int size = 8;
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
struct test_obj {
- void *ptr;
int value;
struct rhash_head node;
};
-static const struct rhashtable_params test_rht_params = {
- .nelem_hint = TEST_HT_SIZE,
+static struct test_obj array[MAX_ENTRIES];
+
+static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int),
@@ -51,11 +63,14 @@ static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
- for (i = 0; i < TEST_ENTRIES * 2; i++) {
+ for (i = 0; i < entries * 2; i++) {
struct test_obj *obj;
bool expected = !(i % 2);
u32 key = i;
+ if (array[i / 2].value == TEST_INSERT_FAIL)
+ expected = false;
+
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) {
@@ -66,9 +81,9 @@ static int __init test_rht_lookup(struct rhashtable *ht)
key);
return -EEXIST;
} else if (expected && obj) {
- if (obj->ptr != TEST_PTR || obj->value != i) {
- pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
- obj->ptr, TEST_PTR, obj->value, i);
+ if (obj->value != i) {
+ pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
+ obj->value, i);
return -EINVAL;
}
}
@@ -77,129 +92,147 @@ static int __init test_rht_lookup(struct rhashtable *ht)
return 0;
}
-static void test_bucket_stats(struct rhashtable *ht, bool quiet)
+static void test_bucket_stats(struct rhashtable *ht)
{
- unsigned int cnt, rcu_cnt, i, total = 0;
+ unsigned int err, total = 0, chain_len = 0;
+ struct rhashtable_iter hti;
struct rhash_head *pos;
- struct test_obj *obj;
- struct bucket_table *tbl;
- tbl = rht_dereference_rcu(ht->tbl, ht);
- for (i = 0; i < tbl->size; i++) {
- rcu_cnt = cnt = 0;
+ err = rhashtable_walk_init(ht, &hti);
+ if (err) {
+ pr_warn("Test failed: allocation error");
+ return;
+ }
- if (!quiet)
- pr_info(" [%#4x/%u]", i, tbl->size);
+ err = rhashtable_walk_start(&hti);
+ if (err && err != -EAGAIN) {
+ pr_warn("Test failed: iterator failed: %d\n", err);
+ return;
+ }
- rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
- cnt++;
- total++;
- if (!quiet)
- pr_cont(" [%p],", obj);
+ while ((pos = rhashtable_walk_next(&hti))) {
+ if (PTR_ERR(pos) == -EAGAIN) {
+ pr_info("Info: encountered resize\n");
+ chain_len++;
+ continue;
+ } else if (IS_ERR(pos)) {
+ pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
+ PTR_ERR(pos));
+ break;
}
- rht_for_each_entry_rcu(obj, pos, tbl, i, node)
- rcu_cnt++;
-
- if (rcu_cnt != cnt)
- pr_warn("Test failed: Chain count mismach %d != %d",
- cnt, rcu_cnt);
-
- if (!quiet)
- pr_cont("\n [%#x] first element: %p, chain length: %u\n",
- i, tbl->buckets[i], cnt);
+ total++;
}
- pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
- total, atomic_read(&ht->nelems), TEST_ENTRIES);
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
+ total, atomic_read(&ht->nelems), entries, chain_len);
- if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
+ if (total != atomic_read(&ht->nelems) || total != entries)
pr_warn("Test failed: Total count mismatch ^^^");
}
-static int __init test_rhashtable(struct rhashtable *ht)
+static s64 __init test_rhashtable(struct rhashtable *ht)
{
- struct bucket_table *tbl;
struct test_obj *obj;
- struct rhash_head *pos, *next;
int err;
- unsigned int i;
+ unsigned int i, insert_fails = 0;
+ s64 start, end;
/*
* Insertion Test:
- * Insert TEST_ENTRIES into table with all keys even numbers
+ * Insert entries into table with all keys even numbers
*/
- pr_info(" Adding %d keys\n", TEST_ENTRIES);
- for (i = 0; i < TEST_ENTRIES; i++) {
- struct test_obj *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj) {
- err = -ENOMEM;
- goto error;
- }
+ pr_info(" Adding %d keys\n", entries);
+ start = ktime_get_ns();
+ for (i = 0; i < entries; i++) {
+ struct test_obj *obj = &array[i];
- obj->ptr = TEST_PTR;
obj->value = i * 2;
err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
- if (err) {
- kfree(obj);
- goto error;
+ if (err == -ENOMEM || err == -EBUSY) {
+ /* Mark failed inserts but continue */
+ obj->value = TEST_INSERT_FAIL;
+ insert_fails++;
+ } else if (err) {
+ return err;
}
}
+ if (insert_fails)
+ pr_info(" %u insertions failed due to memory pressure\n",
+ insert_fails);
+
+ test_bucket_stats(ht);
rcu_read_lock();
- test_bucket_stats(ht, true);
test_rht_lookup(ht);
rcu_read_unlock();
- rcu_read_lock();
- test_bucket_stats(ht, true);
- rcu_read_unlock();
+ test_bucket_stats(ht);
- pr_info(" Deleting %d keys\n", TEST_ENTRIES);
- for (i = 0; i < TEST_ENTRIES; i++) {
+ pr_info(" Deleting %d keys\n", entries);
+ for (i = 0; i < entries; i++) {
u32 key = i * 2;
- obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
- BUG_ON(!obj);
+ if (array[i].value != TEST_INSERT_FAIL) {
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
+ BUG_ON(!obj);
- rhashtable_remove_fast(ht, &obj->node, test_rht_params);
- kfree(obj);
+ rhashtable_remove_fast(ht, &obj->node, test_rht_params);
+ }
}
- return 0;
-
-error:
- tbl = rht_dereference_rcu(ht->tbl, ht);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
- kfree(obj);
+ end = ktime_get_ns();
+ pr_info(" Duration of test: %lld ns\n", end - start);
- return err;
+ return end - start;
}
static struct rhashtable ht;
static int __init test_rht_init(void)
{
- int err;
+ int i, err;
+ u64 total_time = 0;
- pr_info("Running resizable hashtable tests...\n");
+ entries = min(entries, MAX_ENTRIES);
- err = rhashtable_init(&ht, &test_rht_params);
- if (err < 0) {
- pr_warn("Test failed: Unable to initialize hashtable: %d\n",
- err);
- return err;
- }
+ test_rht_params.automatic_shrinking = shrinking;
+ test_rht_params.max_size = max_size;
+ test_rht_params.nelem_hint = size;
- err = test_rhashtable(&ht);
+ pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
+ size, max_size, shrinking);
- rhashtable_destroy(&ht);
+ for (i = 0; i < runs; i++) {
+ s64 time;
- return err;
+ pr_info("Test %02d:\n", i);
+ memset(&array, 0, sizeof(array));
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ continue;
+ }
+
+ time = test_rhashtable(&ht);
+ rhashtable_destroy(&ht);
+ if (time < 0) {
+ pr_warn("Test failed: return code %lld\n", time);
+ return -EINVAL;
+ }
+
+ total_time += time;
+ }
+
+ do_div(total_time, runs);
+ pr_info("Average test time: %llu\n", total_time);
+
+ return 0;
}
static void __exit test_rht_exit(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ebffa0e4a9c0..2fd31aebef30 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2967,6 +2967,104 @@ void free_pages(unsigned long addr, unsigned int order)
EXPORT_SYMBOL(free_pages);
/*
+ * Page Fragment:
+ * An arbitrary-length arbitrary-offset area of memory which resides
+ * within a 0 or higher order page. Multiple fragments within that page
+ * are individually refcounted, in the page's reference counter.
+ *
+ * The page_frag functions below provide a simple allocation framework for
+ * page fragments. This is used by the network stack and network device
+ * drivers to provide a backing region of memory for use as either an
+ * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
+ */
+static struct page *__page_frag_refill(struct page_frag_cache *nc,
+ gfp_t gfp_mask)
+{
+ struct page *page = NULL;
+ gfp_t gfp = gfp_mask;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NOMEMALLOC;
+ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+ PAGE_FRAG_CACHE_MAX_ORDER);
+ nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+#endif
+ if (unlikely(!page))
+ page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+
+ nc->va = page ? page_address(page) : NULL;
+
+ return page;
+}
+
+void *__alloc_page_frag(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ unsigned int size = PAGE_SIZE;
+ struct page *page;
+ int offset;
+
+ if (unlikely(!nc->va)) {
+refill:
+ page = __page_frag_refill(nc, gfp_mask);
+ if (!page)
+ return NULL;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ /* if size can vary use size else just use PAGE_SIZE */
+ size = nc->size;
+#endif
+ /* Even if we own the page, we do not use atomic_set().
+ * This would break get_page_unless_zero() users.
+ */
+ atomic_add(size - 1, &page->_count);
+
+ /* reset page count bias and offset to start of new frag */
+ nc->pfmemalloc = page->pfmemalloc;
+ nc->pagecnt_bias = size;
+ nc->offset = size;
+ }
+
+ offset = nc->offset - fragsz;
+ if (unlikely(offset < 0)) {
+ page = virt_to_page(nc->va);
+
+ if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
+ goto refill;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ /* if size can vary use size else just use PAGE_SIZE */
+ size = nc->size;
+#endif
+ /* OK, page count is 0, we can safely set it */
+ atomic_set(&page->_count, size);
+
+ /* reset page count bias and offset to start of new frag */
+ nc->pagecnt_bias = size;
+ offset = size - fragsz;
+ }
+
+ nc->pagecnt_bias--;
+ nc->offset = offset;
+
+ return nc->va + offset;
+}
+EXPORT_SYMBOL(__alloc_page_frag);
+
+/*
+ * Frees a page fragment allocated out of either a compound or order 0 page.
+ */
+void __free_page_frag(void *addr)
+{
+ struct page *page = virt_to_head_page(addr);
+
+ if (unlikely(put_page_testzero(page)))
+ __free_pages_ok(page, compound_order(page));
+}
+EXPORT_SYMBOL(__free_page_frag);
+
+/*
* alloc_kmem_pages charges newly allocated pages to the kmem resource counter
* of the current memory cgroup.
*
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 59555f0f8fc8..d2cd9de4b724 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -618,6 +618,92 @@ out:
return err;
}
+static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct sk_buff *p, **pp = NULL;
+ struct vlan_hdr *vhdr;
+ unsigned int hlen, off_vlan;
+ const struct packet_offload *ptype;
+ __be16 type;
+ int flush = 1;
+
+ off_vlan = skb_gro_offset(skb);
+ hlen = off_vlan + sizeof(*vhdr);
+ vhdr = skb_gro_header_fast(skb, off_vlan);
+ if (skb_gro_header_hard(skb, hlen)) {
+ vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
+ if (unlikely(!vhdr))
+ goto out;
+ }
+
+ type = vhdr->h_vlan_encapsulated_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (!ptype)
+ goto out_unlock;
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ struct vlan_hdr *vhdr2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
+ if (compare_vlan_header(vhdr, vhdr2))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ skb_gro_pull(skb, sizeof(*vhdr));
+ skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
+ __be16 type = vhdr->h_vlan_encapsulated_proto;
+ struct packet_offload *ptype;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+
+ rcu_read_unlock();
+ return err;
+}
+
+static struct packet_offload vlan_packet_offloads[] __read_mostly = {
+ {
+ .type = cpu_to_be16(ETH_P_8021Q),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = vlan_gro_receive,
+ .gro_complete = vlan_gro_complete,
+ },
+ },
+ {
+ .type = cpu_to_be16(ETH_P_8021AD),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = vlan_gro_receive,
+ .gro_complete = vlan_gro_complete,
+ },
+ },
+};
+
static int __net_init vlan_init_net(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
@@ -645,6 +731,7 @@ static struct pernet_operations vlan_net_ops = {
static int __init vlan_proto_init(void)
{
int err;
+ unsigned int i;
pr_info("%s v%s\n", vlan_fullname, vlan_version);
@@ -668,6 +755,9 @@ static int __init vlan_proto_init(void)
if (err < 0)
goto err5;
+ for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+ dev_add_offload(&vlan_packet_offloads[i]);
+
vlan_ioctl_set(vlan_ioctl_handler);
return 0;
@@ -685,7 +775,13 @@ err0:
static void __exit vlan_cleanup_module(void)
{
+ unsigned int i;
+
vlan_ioctl_set(NULL);
+
+ for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+ dev_remove_offload(&vlan_packet_offloads[i]);
+
vlan_netlink_fini();
unregister_netdevice_notifier(&vlan_notifier_block);
diff --git a/net/Kconfig b/net/Kconfig
index 44dd5786ee91..57a7c5af3175 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -45,6 +45,9 @@ config COMPAT_NETLINK_MESSAGES
Newly written code should NEVER need this option but do
compat-independent messages instead!
+config NET_INGRESS
+ bool
+
menu "Networking options"
source "net/packet/Kconfig"
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3b7ad43c7dad..d5871ac493eb 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1030,7 +1030,7 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
goto out;
rc = -ENOMEM;
- sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
+ sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
if (!sk)
goto out;
rc = 0;
diff --git a/net/atm/common.c b/net/atm/common.c
index ed0466637e13..49a872db7e42 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -141,7 +141,7 @@ static struct proto vcc_proto = {
.release_cb = vcc_release_cb,
};
-int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
+int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern)
{
struct sock *sk;
struct atm_vcc *vcc;
@@ -149,7 +149,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
sock->sk = NULL;
if (sock->type == SOCK_STREAM)
return -EINVAL;
- sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
+ sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
diff --git a/net/atm/common.h b/net/atm/common.h
index 4d6f5b2068ac..959436b87182 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -10,7 +10,7 @@
#include <linux/poll.h> /* for poll_table */
-int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
+int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern);
int vcc_release(struct socket *sock);
int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index ae0324021407..040207ec399f 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -136,7 +136,7 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
return -EAFNOSUPPORT;
sock->ops = &pvc_proto_ops;
- return vcc_create(net, sock, protocol, PF_ATMPVC);
+ return vcc_create(net, sock, protocol, PF_ATMPVC, kern);
}
static const struct net_proto_family pvc_family_ops = {
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 1ba23f5018e7..3fa0a9ee98d1 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -660,7 +660,7 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
return -EAFNOSUPPORT;
sock->ops = &svc_proto_ops;
- error = vcc_create(net, sock, protocol, AF_ATMSVC);
+ error = vcc_create(net, sock, protocol, AF_ATMSVC, kern);
if (error)
return error;
ATM_SD(sock)->local.sas_family = AF_ATMSVC;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 330c1f4a5a0b..4273533d22b1 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -855,7 +855,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
return -ESOCKTNOSUPPORT;
}
- sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
+ sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern);
if (sk == NULL)
return -ENOMEM;
@@ -881,7 +881,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
struct sock *sk;
ax25_cb *ax25, *oax25;
- sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot);
+ sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0);
if (sk == NULL)
return NULL;
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index eb7d8c0388e4..21434ab79d2c 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
@@ -20,7 +20,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
batman-adv-y += bat_iv_ogm.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
-batman-adv-y += debugfs.o
+batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o
batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
batman-adv-y += fragmentation.o
batman-adv-y += gateway_client.o
@@ -29,6 +29,7 @@ batman-adv-y += hard-interface.o
batman-adv-y += hash.o
batman-adv-y += icmp_socket.o
batman-adv-y += main.o
+batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
batman-adv-y += originator.o
batman-adv-y += routing.o
@@ -36,4 +37,3 @@ batman-adv-y += send.o
batman-adv-y += soft-interface.o
batman-adv-y += sysfs.o
batman-adv-y += translation-table.o
-batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 4e49666f8c65..4e59cf3eb079 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 00e00e09b000..753383c2215c 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,20 +15,50 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "bat_algo.h"
#include "main.h"
-#include "translation-table.h"
+
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "bitarray.h"
+#include "hard-interface.h"
+#include "hash.h"
+#include "network-coding.h"
#include "originator.h"
+#include "packet.h"
#include "routing.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "hard-interface.h"
#include "send.h"
-#include "bat_algo.h"
-#include "network-coding.h"
+#include "translation-table.h"
/**
* enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_NO_DUP: the packet is no duplicate
* @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
* neighbor)
* @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
@@ -55,7 +85,7 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
}
/**
- * batadv_ring_buffer_set - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg - compute the average of all non-zero values stored
* in the given ring buffer
* @lq_recv: pointer to the ring buffer
*
@@ -64,7 +94,9 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
{
const uint8_t *ptr;
- uint16_t count = 0, i = 0, sum = 0;
+ uint16_t count = 0;
+ uint16_t i = 0;
+ uint16_t sum = 0;
ptr = lq_recv;
@@ -308,7 +340,6 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
struct batadv_ogm_packet *batadv_ogm_packet;
unsigned char *ogm_buff;
uint32_t random_seqno;
- int res = -ENOMEM;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
@@ -317,7 +348,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
if (!ogm_buff)
- goto out;
+ return -ENOMEM;
hard_iface->bat_iv.ogm_buff = ogm_buff;
@@ -329,10 +360,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
- res = 0;
-
-out:
- return res;
+ return 0;
}
static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -396,8 +424,8 @@ static uint8_t batadv_hop_penalty(uint8_t tq,
}
/* is there another aggregated packet here? */
-static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
- __be16 tvlv_len)
+static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ __be16 tvlv_len)
{
int next_buff_pos = 0;
@@ -413,7 +441,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
struct batadv_hard_iface *hard_iface)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- char *fwd_str;
+ const char *fwd_str;
uint8_t packet_num;
int16_t buff_pos;
struct batadv_ogm_packet *batadv_ogm_packet;
@@ -451,7 +479,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
- (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
+ ((batadv_ogm_packet->flags & BATADV_DIRECTLINK) ?
"on" : "off"),
hard_iface->net_dev->name,
hard_iface->net_dev->dev_addr);
@@ -548,58 +576,62 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
* - the send time is within our MAX_AGGREGATION_MS time
* - the resulting packet wont be bigger than
* MAX_AGGREGATION_BYTES
+ * otherwise aggregation is not possible
*/
- if (time_before(send_time, forw_packet->send_time) &&
- time_after_eq(aggregation_end_time, forw_packet->send_time) &&
- (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
- /* check aggregation compatibility
- * -> direct link packets are broadcasted on
- * their interface only
- * -> aggregate packet if the current packet is
- * a "global" packet as well as the base
- * packet
- */
- primary_if = batadv_primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* packet is not leaving on the same interface. */
- if (forw_packet->if_outgoing != if_outgoing)
- goto out;
+ if (!time_before(send_time, forw_packet->send_time) ||
+ !time_after_eq(aggregation_end_time, forw_packet->send_time))
+ return false;
+
+ if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
+ return false;
+
+ /* packet is not leaving on the same interface. */
+ if (forw_packet->if_outgoing != if_outgoing)
+ return false;
+
+ /* check aggregation compatibility
+ * -> direct link packets are broadcasted on
+ * their interface only
+ * -> aggregate packet if the current packet is
+ * a "global" packet as well as the base
+ * packet
+ */
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ return false;
- /* packets without direct link flag and high TTL
- * are flooded through the net
- */
- if ((!directlink) &&
- (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
- (batadv_ogm_packet->ttl != 1) &&
-
- /* own packets originating non-primary
- * interfaces leave only that interface
- */
- ((!forw_packet->own) ||
- (forw_packet->if_incoming == primary_if))) {
- res = true;
- goto out;
- }
+ /* packets without direct link flag and high TTL
+ * are flooded through the net
+ */
+ if (!directlink &&
+ !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) &&
+ batadv_ogm_packet->ttl != 1 &&
+
+ /* own packets originating non-primary
+ * interfaces leave only that interface
+ */
+ (!forw_packet->own ||
+ forw_packet->if_incoming == primary_if)) {
+ res = true;
+ goto out;
+ }
- /* if the incoming packet is sent via this one
- * interface only - we still can aggregate
- */
- if ((directlink) &&
- (new_bat_ogm_packet->ttl == 1) &&
- (forw_packet->if_incoming == if_incoming) &&
-
- /* packets from direct neighbors or
- * own secondary interface packets
- * (= secondary interface packets in general)
- */
- (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
- (forw_packet->own &&
- forw_packet->if_incoming != primary_if))) {
- res = true;
- goto out;
- }
+ /* if the incoming packet is sent via this one
+ * interface only - we still can aggregate
+ */
+ if (directlink &&
+ new_bat_ogm_packet->ttl == 1 &&
+ forw_packet->if_incoming == if_incoming &&
+
+ /* packets from direct neighbors or
+ * own secondary interface packets
+ * (= secondary interface packets in general)
+ */
+ (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
+ (forw_packet->own &&
+ forw_packet->if_incoming != primary_if))) {
+ res = true;
+ goto out;
}
out:
@@ -642,19 +674,16 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"batman packet queue full\n");
- goto out;
+ goto out_free_outgoing;
}
}
forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
- if (!forw_packet_aggr) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- goto out;
- }
+ if (!forw_packet_aggr)
+ goto out_nomem;
- if ((atomic_read(&bat_priv->aggregated_ogms)) &&
- (packet_len < BATADV_MAX_AGGREGATION_BYTES))
+ if (atomic_read(&bat_priv->aggregated_ogms) &&
+ packet_len < BATADV_MAX_AGGREGATION_BYTES)
skb_size = BATADV_MAX_AGGREGATION_BYTES;
else
skb_size = packet_len;
@@ -662,12 +691,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
skb_size += ETH_HLEN;
forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
- if (!forw_packet_aggr->skb) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- kfree(forw_packet_aggr);
- goto out;
- }
+ if (!forw_packet_aggr->skb)
+ goto out_free_forw_packet;
forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
@@ -699,7 +724,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
send_time - jiffies);
return;
-out:
+out_free_forw_packet:
+ kfree(forw_packet_aggr);
+out_nomem:
+ if (!own_packet)
+ atomic_inc(&bat_priv->batman_queue_left);
+out_free_outgoing:
batadv_hardif_free_ref(if_outgoing);
out_free_incoming:
batadv_hardif_free_ref(if_incoming);
@@ -752,13 +782,13 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
unsigned long max_aggregation_jiffies;
batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
- direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
+ direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK);
max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
/* find position for the packet in the forward queue */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
/* own packets are not to be aggregated */
- if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
+ if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) {
hlist_for_each_entry(forw_packet_pos,
&bat_priv->forw_bat_list, list) {
if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
@@ -1034,9 +1064,10 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
batadv_orig_node_free_ref(orig_tmp);
if (!neigh_node)
goto unlock;
- } else
+ } else {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Updating existing last-hop neighbor of originator\n");
+ }
rcu_read_unlock();
neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1081,7 +1112,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
* won't consider it either
*/
if (router_ifinfo &&
- (neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg)) {
+ neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
orig_node_tmp = router->orig_node;
spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
if_num = router->if_incoming->if_num;
@@ -1356,8 +1387,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
out:
spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
batadv_orig_node_free_ref(orig_node);
- if (orig_ifinfo)
- batadv_orig_ifinfo_free_ref(orig_ifinfo);
+ batadv_orig_ifinfo_free_ref(orig_ifinfo);
return ret;
}
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index e3da07a64026..cf68c328345e 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -15,10 +15,10 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "bitarray.h"
+#include "main.h"
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
/* shift the packet array by n places. */
static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 2acaafe60188..0c2456225fae 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -18,6 +18,12 @@
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_
+#include "main.h"
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
/* Returns 1 if the corresponding bit in the given seq_bits indicates true
* and curr_seqno is within range of last_seqno. Otherwise returns 0.
*/
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ac4b96eccade..ba0609292ae7 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -15,19 +15,41 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
-#include "hash.h"
-#include "hard-interface.h"
-#include "originator.h"
#include "bridge_loop_avoidance.h"
-#include "translation-table.h"
-#include "send.h"
+#include "main.h"
-#include <linux/etherdevice.h>
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
#include <linux/crc16.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
#include <linux/if_arp.h>
-#include <net/arp.h>
+#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/arp.h>
+
+#include "hard-interface.h"
+#include "hash.h"
+#include "originator.h"
+#include "packet.h"
+#include "translation-table.h"
static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
@@ -42,12 +64,8 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
uint32_t hash = 0;
- hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
- hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
+ hash = jhash(&claim->addr, sizeof(claim->addr), hash);
+ hash = jhash(&claim->vid, sizeof(claim->vid), hash);
return hash % size;
}
@@ -59,12 +77,8 @@ static inline uint32_t batadv_choose_backbone_gw(const void *data,
const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
uint32_t hash = 0;
- hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
- hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
+ hash = jhash(&claim->addr, sizeof(claim->addr), hash);
+ hash = jhash(&claim->vid, sizeof(claim->vid), hash);
return hash % size;
}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 43c985d92c3e..0282690389ac 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -18,6 +18,16 @@
#ifndef _NET_BATMAN_ADV_BLA_H_
#define _NET_BATMAN_ADV_BLA_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_hard_iface;
+struct batadv_orig_node;
+struct batadv_priv;
+struct seq_file;
+struct sk_buff;
+
#ifdef CONFIG_BATMAN_ADV_BLA
int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, bool is_bcast);
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index a4972874c056..c4c1e8030ba0 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -15,21 +15,42 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "debugfs.h"
#include "main.h"
+#include <linux/compiler.h>
#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/poll.h>
+#include <linux/printk.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <stdarg.h>
-#include "debugfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "soft-interface.h"
-#include "icmp_socket.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "icmp_socket.h"
#include "network-coding.h"
+#include "originator.h"
+#include "translation-table.h"
static struct dentry *batadv_debugfs;
@@ -482,11 +503,7 @@ rem_attr:
debugfs_remove_recursive(hard_iface->debug_dir);
hard_iface->debug_dir = NULL;
out:
-#ifdef CONFIG_DEBUG_FS
return -ENOMEM;
-#else
- return 0;
-#endif /* CONFIG_DEBUG_FS */
}
/**
@@ -541,11 +558,7 @@ rem_attr:
debugfs_remove_recursive(bat_priv->debug_dir);
bat_priv->debug_dir = NULL;
out:
-#ifdef CONFIG_DEBUG_FS
return -ENOMEM;
-#else
- return 0;
-#endif /* CONFIG_DEBUG_FS */
}
void batadv_debugfs_del_meshif(struct net_device *dev)
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 37c4d6ddd04d..187acdc85dfa 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -18,8 +18,17 @@
#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
#define _NET_BATMAN_ADV_DEBUGFS_H_
+#include "main.h"
+
+#include <linux/kconfig.h>
+
+struct batadv_hard_iface;
+struct net_device;
+
#define BATADV_DEBUGFS_SUBDIR "batman_adv"
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
void batadv_debugfs_init(void);
void batadv_debugfs_destroy(void);
int batadv_debugfs_add_meshif(struct net_device *dev);
@@ -27,4 +36,36 @@ void batadv_debugfs_del_meshif(struct net_device *dev);
int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
+#else
+
+static inline void batadv_debugfs_init(void)
+{
+}
+
+static inline void batadv_debugfs_destroy(void)
+{
+}
+
+static inline int batadv_debugfs_add_meshif(struct net_device *dev)
+{
+ return 0;
+}
+
+static inline void batadv_debugfs_del_meshif(struct net_device *dev)
+{
+}
+
+static inline
+int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
+{
+ return 0;
+}
+
+static inline
+void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
+#endif
+
#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index aad022dd15df..fb54e6aed096 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
@@ -15,18 +15,36 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/if_ether.h>
+#include "distributed-arp-table.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
#include <linux/if_arp.h>
+#include <linux/if_ether.h>
#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/arp.h>
-#include "main.h"
-#include "hash.h"
-#include "distributed-arp-table.h"
#include "hard-interface.h"
+#include "hash.h"
#include "originator.h"
#include "send.h"
-#include "types.h"
#include "translation-table.h"
static void batadv_dat_purge(struct work_struct *work);
@@ -206,9 +224,22 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
{
uint32_t hash = 0;
const struct batadv_dat_entry *dat = data;
+ const unsigned char *key;
+ uint32_t i;
- hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
- hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
+ key = (const unsigned char *)&dat->ip;
+ for (i = 0; i < sizeof(dat->ip); i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ key = (const unsigned char *)&dat->vid;
+ for (i = 0; i < sizeof(dat->vid); i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
hash += (hash << 3);
hash ^= (hash >> 11);
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 2fe0764c64be..3181507ebc14 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
@@ -18,12 +18,19 @@
#ifndef _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_
#define _NET_BATMAN_ADV_DISTRIBUTED_ARP_TABLE_H_
-#ifdef CONFIG_BATMAN_ADV_DAT
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
-#include "types.h"
#include "originator.h"
+#include "packet.h"
-#include <linux/if_arp.h>
+struct seq_file;
+struct sk_buff;
+
+#ifdef CONFIG_BATMAN_ADV_DAT
/* BATADV_DAT_ADDR_MAX - maximum address value in the DHT space */
#define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 3d1dcaa3e8b5..c0f0d01ab244 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
@@ -15,12 +15,28 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "fragmentation.h"
-#include "send.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include "hard-interface.h"
#include "originator.h"
+#include "packet.h"
#include "routing.h"
-#include "hard-interface.h"
+#include "send.h"
#include "soft-interface.h"
/**
@@ -161,6 +177,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
hlist_add_head(&frag_entry_new->list, &chain->head);
chain->size = skb->len - hdr_size;
chain->timestamp = jiffies;
+ chain->total_size = ntohs(frag_packet->total_size);
ret = true;
goto out;
}
@@ -195,9 +212,11 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
out:
if (chain->size > batadv_frag_size_limit() ||
- ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+ chain->total_size != ntohs(frag_packet->total_size) ||
+ chain->total_size > batadv_frag_size_limit()) {
/* Clear chain if total size of either the list or the packet
- * exceeds the maximum size of one merged packet.
+ * exceeds the maximum size of one merged packet. Don't allow
+ * packets to have different total_size.
*/
batadv_frag_clear_chain(&chain->head);
chain->size = 0;
@@ -228,19 +247,13 @@ err:
* Returns the merged skb or NULL on error.
*/
static struct sk_buff *
-batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+batadv_frag_merge_packets(struct hlist_head *chain)
{
struct batadv_frag_packet *packet;
struct batadv_frag_list_entry *entry;
struct sk_buff *skb_out = NULL;
int size, hdr_size = sizeof(struct batadv_frag_packet);
- /* Make sure incoming skb has non-bogus data. */
- packet = (struct batadv_frag_packet *)skb->data;
- size = ntohs(packet->total_size);
- if (size > batadv_frag_size_limit())
- goto free;
-
/* Remove first entry, as this is the destination for the rest of the
* fragments.
*/
@@ -249,6 +262,9 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
skb_out = entry->skb;
kfree(entry);
+ packet = (struct batadv_frag_packet *)skb_out->data;
+ size = ntohs(packet->total_size);
+
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
@@ -304,7 +320,7 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
if (hlist_empty(&head))
goto out;
- skb_out = batadv_frag_merge_packets(&head, *skb);
+ skb_out = batadv_frag_merge_packets(&head);
if (!skb_out)
goto out_err;
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index d848cf6676a2..8b9877e70b95 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
@@ -18,6 +18,15 @@
#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
#define _NET_BATMAN_ADV_FRAGMENTATION_H_
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct sk_buff;
+
void batadv_frag_purge_orig(struct batadv_orig_node *orig,
bool (*check_cb)(struct batadv_frag_table_entry *));
bool batadv_frag_skb_fwd(struct sk_buff *skb,
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 090828cf1fa7..bb0158620628 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -15,18 +15,38 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
-#include "sysfs.h"
#include "gateway_client.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/udp.h>
+
#include "gateway_common.h"
#include "hard-interface.h"
#include "originator.h"
-#include "translation-table.h"
+#include "packet.h"
#include "routing.h"
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/udp.h>
-#include <linux/if_vlan.h>
+#include "sysfs.h"
+#include "translation-table.h"
/* These are the offsets of the "hw type" and "hw address length" in the dhcp
* packet starting at the beginning of the dhcp header
@@ -733,11 +753,6 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return BATADV_DHCP_NO;
- /* skb->data might have been reallocated by pskb_may_pull() */
- ethhdr = eth_hdr(skb);
- if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
- ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
-
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 7ee53bb7d50f..89565b451c18 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -18,6 +18,14 @@
#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_tvlv_gateway_data;
+struct seq_file;
+struct sk_buff;
+
void batadv_gw_check_client_stop(struct batadv_priv *bat_priv);
void batadv_gw_reselect(struct batadv_priv *bat_priv);
void batadv_gw_election(struct batadv_priv *bat_priv);
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 88a1bc3804d1..39cf44ccebd4 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -15,9 +15,18 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "gateway_common.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+
#include "gateway_client.h"
+#include "packet.h"
/**
* batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index aa5116561947..bd5c812cebf4 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -18,6 +18,13 @@
#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_priv;
+struct net_device;
+
enum batadv_gw_modes {
BATADV_GW_MODE_OFF,
BATADV_GW_MODE_CLIENT,
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index baf1f9843f2c..f4a15d2e5eaf 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,22 +15,36 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
-#include "distributed-arp-table.h"
#include "hard-interface.h"
-#include "soft-interface.h"
-#include "send.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "sysfs.h"
-#include "debugfs.h"
-#include "originator.h"
-#include "hash.h"
-#include "bridge_loop_avoidance.h"
-#include "gateway_client.h"
+#include "main.h"
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+
+#include "bridge_loop_avoidance.h"
+#include "debugfs.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "originator.h"
+#include "packet.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "sysfs.h"
+#include "translation-table.h"
void batadv_hardif_free_rcu(struct rcu_head *rcu)
{
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 1918cd50b62e..5a31420513e1 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -18,6 +18,17 @@
#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/notifier.h>
+#include <linux/rcupdate.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct net_device;
+
enum batadv_hard_if_state {
BATADV_IF_NOT_IN_USE,
BATADV_IF_TO_BE_REMOVED,
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 7c1c63080e20..e89f3146b092 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -15,8 +15,12 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "hash.h"
+#include "main.h"
+
+#include <linux/fs.h>
+#include <linux/lockdep.h>
+#include <linux/slab.h>
/* clears the hash */
static void batadv_hash_init(struct batadv_hashtable *hash)
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 539fc1266793..5065f50c9c3c 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -18,7 +18,16 @@
#ifndef _NET_BATMAN_ADV_HASH_H_
#define _NET_BATMAN_ADV_HASH_H_
+#include "main.h"
+
+#include <linux/compiler.h>
#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct lock_class_key;
/* callback to a compare function. should compare 2 element datas for their
* keys, return 0 if same and not 0 if not same
@@ -80,28 +89,6 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
}
/**
- * batadv_hash_bytes - hash some bytes and add them to the previous hash
- * @hash: previous hash value
- * @data: data to be hashed
- * @size: number of bytes to be hashed
- *
- * Returns the new hash value.
- */
-static inline uint32_t batadv_hash_bytes(uint32_t hash, const void *data,
- uint32_t size)
-{
- const unsigned char *key = data;
- int i;
-
- for (i = 0; i < size; i++) {
- hash += key[i];
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
- return hash;
-}
-
-/**
* batadv_hash_add - adds data to the hashtable
* @hash: storage hash table
* @compare: callback to determine if 2 hash elements are identical
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 161ef8f17d2e..07061bcbaa04 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -15,14 +15,39 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "icmp_socket.h"
#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/export.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/poll.h>
+#include <linux/printk.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/skbuff.h>
#include <linux/slab.h>
-#include "icmp_socket.h"
-#include "send.h"
-#include "hash.h"
-#include "originator.h"
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+
#include "hard-interface.h"
+#include "originator.h"
+#include "packet.h"
+#include "send.h"
static struct batadv_socket_client *batadv_socket_client_hash[256];
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 0c33950aa4aa..7de7fce4b48c 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -18,6 +18,13 @@
#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_icmp_header;
+struct batadv_priv;
+
#define BATADV_ICMP_SOCKET "socket"
void batadv_socket_init(void);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 12fc77bef23f..8457097f1643 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,31 +15,53 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
#include <linux/crc32c.h>
-#include <linux/highmem.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/pkt_sched.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
#include <net/dsfield.h>
-#include "main.h"
-#include "sysfs.h"
+#include <net/rtnetlink.h>
+
+#include "bat_algo.h"
+#include "bridge_loop_avoidance.h"
#include "debugfs.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "icmp_socket.h"
+#include "multicast.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "packet.h"
#include "routing.h"
#include "send.h"
-#include "originator.h"
#include "soft-interface.h"
-#include "icmp_socket.h"
#include "translation-table.h"
-#include "hard-interface.h"
-#include "gateway_client.h"
-#include "bridge_loop_avoidance.h"
-#include "distributed-arp-table.h"
-#include "multicast.h"
-#include "gateway_common.h"
-#include "hash.h"
-#include "bat_algo.h"
-#include "network-coding.h"
-#include "fragmentation.h"
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
* list traversals just rcu-locked
@@ -209,10 +231,13 @@ void batadv_mesh_free(struct net_device *soft_iface)
* interfaces in the current mesh
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address to check
+ *
+ * Returns 'true' if the mac address was found, false otherwise.
*/
-int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
{
const struct batadv_hard_iface *hard_iface;
+ bool is_my_mac = false;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -223,12 +248,12 @@ int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
continue;
if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
- rcu_read_unlock();
- return 1;
+ is_my_mac = true;
+ break;
}
}
rcu_read_unlock();
- return 0;
+ return is_my_mac;
}
/**
@@ -510,14 +535,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
{
struct batadv_algo_ops *bat_algo_ops_tmp;
- int ret;
bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
if (bat_algo_ops_tmp) {
pr_info("Trying to register already registered routing algorithm: %s\n",
bat_algo_ops->name);
- ret = -EEXIST;
- goto out;
+ return -EEXIST;
}
/* all algorithms must implement all ops (for now) */
@@ -531,32 +554,26 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
!bat_algo_ops->bat_neigh_is_equiv_or_better) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
INIT_HLIST_NODE(&bat_algo_ops->list);
hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
- ret = 0;
-out:
- return ret;
+ return 0;
}
int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
{
struct batadv_algo_ops *bat_algo_ops;
- int ret = -EINVAL;
bat_algo_ops = batadv_algo_get(name);
if (!bat_algo_ops)
- goto out;
+ return -EINVAL;
bat_priv->bat_algo_ops = bat_algo_ops;
- ret = 0;
-out:
- return ret;
+ return 0;
}
int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
@@ -819,15 +836,15 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
- if (new_buff) {
- memcpy(new_buff, *packet_buff, min_packet_len);
- kfree(*packet_buff);
- *packet_buff = new_buff;
- *packet_buff_len = min_packet_len + additional_packet_len;
- return true;
- }
+ if (!new_buff)
+ return false;
+
+ memcpy(new_buff, *packet_buff, min_packet_len);
+ kfree(*packet_buff);
+ *packet_buff = new_buff;
+ *packet_buff_len = min_packet_len + additional_packet_len;
- return false;
+ return true;
}
/**
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 4d2318829a34..41d27c7872b9 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2015.0"
+#define BATADV_SOURCE_VERSION "2015.1"
#endif
/* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
#define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
-#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
+#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */
/* sliding packet range of received originator messages in sequence numbers
* (should be a multiple of our word size)
*/
@@ -163,28 +163,26 @@ enum batadv_uev_type {
/* Kernel headers */
-#include <linux/mutex.h> /* mutex */
-#include <linux/module.h> /* needed by all modules */
-#include <linux/netdevice.h> /* netdevice */
-#include <linux/etherdevice.h> /* ethernet address classification */
-#include <linux/if_ether.h> /* ethernet header */
-#include <linux/poll.h> /* poll_table */
-#include <linux/kthread.h> /* kernel threads */
-#include <linux/pkt_sched.h> /* schedule types */
-#include <linux/workqueue.h> /* workqueue */
+#include <linux/atomic.h>
+#include <linux/bitops.h> /* for packet.h */
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h> /* for packet.h */
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/types.h>
#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <net/sock.h> /* struct sock */
-#include <net/addrconf.h> /* ipv6 address stuff */
-#include <linux/ip.h>
-#include <net/rtnetlink.h>
#include <linux/jiffies.h>
-#include <linux/seq_file.h>
#include <linux/if_vlan.h>
#include "types.h"
-#define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
+struct batadv_ogm_packet;
+struct seq_file;
+struct sk_buff;
+
+#define BATADV_PRINT_VID(vid) ((vid & BATADV_VLAN_HAS_TAG) ? \
(int)(vid & VLAN_VID_MASK) : -1)
extern char batadv_routing_algo[];
@@ -195,7 +193,7 @@ extern struct workqueue_struct *batadv_event_workqueue;
int batadv_mesh_init(struct net_device *soft_iface);
void batadv_mesh_free(struct net_device *soft_iface);
-int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq);
int batadv_max_header_len(void);
@@ -279,7 +277,7 @@ static inline void _batadv_dbg(int type __always_unused,
*
* note: can't use ether_addr_equal() as it requires aligned memory
*/
-static inline int batadv_compare_eth(const void *data1, const void *data2)
+static inline bool batadv_compare_eth(const void *data1, const void *data2)
{
return ether_addr_equal_unaligned(data1, data2);
}
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index b24e4bb64fb5..7aa480b7edd0 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*
@@ -15,10 +15,33 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "multicast.h"
-#include "originator.h"
-#include "hard-interface.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+
+#include "packet.h"
#include "translation-table.h"
/**
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 3a44ebdb43cb..beb6e56c624a 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*
@@ -18,6 +18,12 @@
#ifndef _NET_BATMAN_ADV_MULTICAST_H_
#define _NET_BATMAN_ADV_MULTICAST_H_
+#include "main.h"
+
+struct batadv_orig_node;
+struct batadv_priv;
+struct sk_buff;
+
/**
* batadv_forw_mode - the way a packet should be forwarded as
* @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 127cc4d7380a..f0a50f31d822 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*
@@ -15,15 +15,44 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "network-coding.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
-#include "main.h"
+#include "hard-interface.h"
#include "hash.h"
-#include "network-coding.h"
-#include "send.h"
#include "originator.h"
-#include "hard-interface.h"
+#include "packet.h"
#include "routing.h"
+#include "send.h"
static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
@@ -155,7 +184,7 @@ err:
*/
void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
{
- atomic_set(&bat_priv->network_coding, 1);
+ atomic_set(&bat_priv->network_coding, 0);
bat_priv->nc.min_tq = 200;
bat_priv->nc.max_fwd_delay = 10;
bat_priv->nc.max_buffer_time = 200;
@@ -275,7 +304,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
* max_buffer time
*/
return batadv_has_timed_out(nc_path->last_valid,
- bat_priv->nc.max_buffer_time*10);
+ bat_priv->nc.max_buffer_time * 10);
}
/**
@@ -453,14 +482,8 @@ static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size)
const struct batadv_nc_path *nc_path = data;
uint32_t hash = 0;
- hash = batadv_hash_bytes(hash, &nc_path->prev_hop,
- sizeof(nc_path->prev_hop));
- hash = batadv_hash_bytes(hash, &nc_path->next_hop,
- sizeof(nc_path->next_hop));
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
+ hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
+ hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
return hash % size;
}
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 358c0d686ab0..5b79aa8c64c1 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*
@@ -18,6 +18,19 @@
#ifndef _NET_BATMAN_ADV_NETWORK_CODING_H_
#define _NET_BATMAN_ADV_NETWORK_CODING_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_nc_node;
+struct batadv_neigh_node;
+struct batadv_ogm_packet;
+struct batadv_orig_node;
+struct batadv_priv;
+struct net_device;
+struct seq_file;
+struct sk_buff;
+
#ifdef CONFIG_BATMAN_ADV_NC
void batadv_nc_status_update(struct net_device *net_dev);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 90e805aba379..018b7495ad84 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,19 +15,31 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "originator.h"
#include "main.h"
+
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
#include "distributed-arp-table.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
+#include "fragmentation.h"
#include "gateway_client.h"
#include "hard-interface.h"
-#include "soft-interface.h"
-#include "bridge_loop_avoidance.h"
-#include "network-coding.h"
-#include "fragmentation.h"
+#include "hash.h"
#include "multicast.h"
+#include "network-coding.h"
+#include "routing.h"
+#include "translation-table.h"
/* hash class keys */
static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -197,13 +209,19 @@ static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
struct batadv_neigh_ifinfo *neigh_ifinfo;
+ struct batadv_algo_ops *bao;
neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
+ bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
&neigh_node->ifinfo_list, list) {
batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
}
+
+ if (bao->bat_neigh_free)
+ bao->bat_neigh_free(neigh_node);
+
batadv_hardif_free_ref_now(neigh_node->if_incoming);
kfree(neigh_node);
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index aa4a43696295..79734d302010 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -18,8 +18,21 @@
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
#define _NET_BATMAN_ADV_ORIGINATOR_H_
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/if_ether.h>
+#include <linux/jhash.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
#include "hash.h"
+struct seq_file;
+
int batadv_compare_orig(const struct hlist_node *node, const void *data2);
int batadv_originator_init(struct batadv_priv *bat_priv);
void batadv_originator_free(struct batadv_priv *bat_priv);
@@ -75,20 +88,9 @@ void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
*/
static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
{
- const unsigned char *key = data;
uint32_t hash = 0;
- size_t i;
-
- for (i = 0; i < 6; i++) {
- hash += key[i];
- hash += (hash << 10);
- hash ^= (hash >> 6);
- }
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
+ hash = jhash(data, ETH_ALEN, hash);
return hash % size;
}
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b81fbbf21a63..9e747c08d0bc 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -18,6 +18,9 @@
#ifndef _NET_BATMAN_ADV_PACKET_H_
#define _NET_BATMAN_ADV_PACKET_H_
+#include <asm/byteorder.h>
+#include <linux/types.h>
+
/**
* enum batadv_packettype - types for batman-adv encapsulated packets
* @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index da83982bf974..c360c0cd19c2 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,20 +15,36 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "routing.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "icmp_socket.h"
-#include "translation-table.h"
-#include "originator.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+
+#include "bitarray.h"
#include "bridge_loop_avoidance.h"
#include "distributed-arp-table.h"
-#include "network-coding.h"
#include "fragmentation.h"
-
-#include <linux/if_vlan.h>
+#include "hard-interface.h"
+#include "icmp_socket.h"
+#include "network-coding.h"
+#include "originator.h"
+#include "packet.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "translation-table.h"
static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 557d3d12a9ab..6bc29d33abc1 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -18,6 +18,16 @@
#ifndef _NET_BATMAN_ADV_ROUTING_H_
#define _NET_BATMAN_ADV_ROUTING_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_hard_iface;
+struct batadv_neigh_node;
+struct batadv_orig_node;
+struct batadv_priv;
+struct sk_buff;
+
bool batadv_check_management_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
int header_len);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 3d64ed20c393..0a01992e65ab 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,19 +15,37 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "send.h"
#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/workqueue.h>
+
#include "distributed-arp-table.h"
-#include "send.h"
-#include "routing.h"
-#include "translation-table.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
+#include "fragmentation.h"
#include "gateway_client.h"
-#include "originator.h"
+#include "hard-interface.h"
#include "network-coding.h"
-#include "fragmentation.h"
-#include "multicast.h"
+#include "originator.h"
+#include "routing.h"
+#include "soft-interface.h"
+#include "translation-table.h"
static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
@@ -255,8 +273,8 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
unsigned short vid)
{
- struct ethhdr *ethhdr;
struct batadv_unicast_packet *unicast_packet;
+ struct ethhdr *ethhdr;
int ret = NET_XMIT_DROP;
if (!orig_node)
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 38d0ec1833ae..0536835fe503 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -18,6 +18,19 @@
#ifndef _NET_BATMAN_ADV_SEND_H_
#define _NET_BATMAN_ADV_SEND_H_
+#include "main.h"
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "packet.h"
+
+struct batadv_hard_iface;
+struct batadv_orig_node;
+struct batadv_priv;
+struct sk_buff;
+struct work_struct;
+
int batadv_send_skb_packet(struct sk_buff *skb,
struct batadv_hard_iface *hard_iface,
const uint8_t *dst_addr);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 5ec31d7de24f..c002961da75d 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -15,26 +15,50 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "soft-interface.h"
-#include "hard-interface.h"
-#include "distributed-arp-table.h"
-#include "routing.h"
-#include "send.h"
-#include "debugfs.h"
-#include "translation-table.h"
-#include "hash.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "sysfs.h"
-#include "originator.h"
-#include <linux/slab.h>
-#include <linux/ethtool.h>
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/byteorder/generic.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include "multicast.h"
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "bridge_loop_avoidance.h"
+#include "debugfs.h"
+#include "distributed-arp-table.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
+#include "hard-interface.h"
+#include "multicast.h"
#include "network-coding.h"
+#include "packet.h"
+#include "send.h"
+#include "sysfs.h"
+#include "translation-table.h"
static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static void batadv_get_drvinfo(struct net_device *dev,
@@ -105,6 +129,7 @@ static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_softif_vlan *vlan;
struct sockaddr *addr = p;
uint8_t old_addr[ETH_ALEN];
@@ -115,12 +140,17 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
ether_addr_copy(dev->dev_addr, addr->sa_data);
/* only modify transtable if it has been initialized before */
- if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
- batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ return 0;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+ batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
"mac address changed", false);
- batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS,
+ batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
BATADV_NULL_IFINDEX, BATADV_NO_MARK);
}
+ rcu_read_unlock();
return 0;
}
@@ -732,7 +762,7 @@ static int batadv_softif_init_late(struct net_device *dev)
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
- atomic_set(&bat_priv->bridge_loop_avoidance, 0);
+ atomic_set(&bat_priv->bridge_loop_avoidance, 1);
#endif
#ifdef CONFIG_BATMAN_ADV_DAT
atomic_set(&bat_priv->distributed_arp_table, 1);
@@ -818,7 +848,7 @@ static int batadv_softif_slave_add(struct net_device *dev,
int ret = -EINVAL;
hard_iface = batadv_hardif_get_by_netdev(slave_dev);
- if (!hard_iface || hard_iface->soft_iface != NULL)
+ if (!hard_iface || hard_iface->soft_iface)
goto out;
ret = batadv_hardif_enable_interface(hard_iface, dev->name);
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index dbab22fd89a5..578e8a663c30 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -18,6 +18,17 @@
#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
+#include "main.h"
+
+#include <net/rtnetlink.h>
+
+struct batadv_hard_iface;
+struct batadv_orig_node;
+struct batadv_priv;
+struct batadv_softif_vlan;
+struct net_device;
+struct sk_buff;
+
int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct batadv_hard_iface *recv_if,
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index a75dc12f96f8..d6a312a82c03 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -15,16 +15,35 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "sysfs.h"
-#include "translation-table.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/stringify.h>
+
#include "distributed-arp-table.h"
-#include "network-coding.h"
-#include "originator.h"
+#include "gateway_client.h"
+#include "gateway_common.h"
#include "hard-interface.h"
+#include "network-coding.h"
+#include "packet.h"
#include "soft-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
{
@@ -151,7 +170,7 @@ ssize_t batadv_show_##_name(struct kobject *kobj, \
static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
batadv_store_##_name)
-#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
+#define BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func) \
ssize_t batadv_store_##_name(struct kobject *kobj, \
struct attribute *attr, char *buff, \
size_t count) \
@@ -161,24 +180,24 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &bat_priv->_name, net_dev); \
+ &bat_priv->_var, net_dev); \
}
-#define BATADV_ATTR_SIF_SHOW_UINT(_name) \
+#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
ssize_t batadv_show_##_name(struct kobject *kobj, \
struct attribute *attr, char *buff) \
{ \
struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
\
- return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
+ return sprintf(buff, "%i\n", atomic_read(&bat_priv->_var)); \
} \
/* Use this, if you are going to set [name] in the soft-interface
* (bat_priv) to an unsigned integer value
*/
-#define BATADV_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
- static BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)\
- static BATADV_ATTR_SIF_SHOW_UINT(_name) \
+#define BATADV_ATTR_SIF_UINT(_name, _var, _mode, _min, _max, _post_func)\
+ static BATADV_ATTR_SIF_STORE_UINT(_name, _var, _min, _max, _post_func)\
+ static BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
batadv_store_##_name)
@@ -540,19 +559,20 @@ BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
batadv_store_gw_mode);
-BATADV_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER,
- INT_MAX, NULL);
-BATADV_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE,
- NULL);
-BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
- batadv_post_gw_reselect);
+BATADV_ATTR_SIF_UINT(orig_interval, orig_interval, S_IRUGO | S_IWUSR,
+ 2 * BATADV_JITTER, INT_MAX, NULL);
+BATADV_ATTR_SIF_UINT(hop_penalty, hop_penalty, S_IRUGO | S_IWUSR, 0,
+ BATADV_TQ_MAX_VALUE, NULL);
+BATADV_ATTR_SIF_UINT(gw_sel_class, gw_sel_class, S_IRUGO | S_IWUSR, 1,
+ BATADV_TQ_MAX_VALUE, batadv_post_gw_reselect);
static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
batadv_store_gw_bwidth);
#ifdef CONFIG_BATMAN_ADV_MCAST
BATADV_ATTR_SIF_BOOL(multicast_mode, S_IRUGO | S_IWUSR, NULL);
#endif
#ifdef CONFIG_BATMAN_ADV_DEBUG
-BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
+BATADV_ATTR_SIF_UINT(log_level, log_level, S_IRUGO | S_IWUSR, 0,
+ BATADV_DBG_ALL, NULL);
#endif
#ifdef CONFIG_BATMAN_ADV_NC
BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR,
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index b715b60db7cd..2294583f7cf9 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -18,6 +18,16 @@
#ifndef _NET_BATMAN_ADV_SYSFS_H_
#define _NET_BATMAN_ADV_SYSFS_H_
+#include "main.h"
+
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+struct batadv_priv;
+struct batadv_softif_vlan;
+struct kobject;
+struct net_device;
+
#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 07b263a437d1..b4824951010b 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -15,18 +15,41 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "main.h"
#include "translation-table.h"
-#include "soft-interface.h"
+#include "main.h"
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/byteorder/generic.h>
+#include <linux/compiler.h>
+#include <linux/crc32c.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/jhash.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+
+#include "bridge_loop_avoidance.h"
#include "hard-interface.h"
-#include "send.h"
#include "hash.h"
-#include "originator.h"
-#include "routing.h"
-#include "bridge_loop_avoidance.h"
#include "multicast.h"
-
-#include <linux/crc32c.h>
+#include "originator.h"
+#include "packet.h"
+#include "soft-interface.h"
/* hash class keys */
static struct lock_class_key batadv_tt_local_hash_lock_class_key;
@@ -67,12 +90,8 @@ static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
uint32_t hash = 0;
tt = (struct batadv_tt_common_entry *)data;
- hash = batadv_hash_bytes(hash, &tt->addr, ETH_ALEN);
- hash = batadv_hash_bytes(hash, &tt->vid, sizeof(tt->vid));
-
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
+ hash = jhash(&tt->addr, ETH_ALEN, hash);
+ hash = jhash(&tt->vid, sizeof(tt->vid), hash);
return hash % size;
}
@@ -954,17 +973,17 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
" * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n",
tt_common_entry->addr,
BATADV_PRINT_VID(tt_common_entry->vid),
- (tt_common_entry->flags &
- BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
+ ((tt_common_entry->flags &
+ BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
no_purge ? 'P' : '.',
- (tt_common_entry->flags &
- BATADV_TT_CLIENT_NEW ? 'N' : '.'),
- (tt_common_entry->flags &
- BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
- (tt_common_entry->flags &
- BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
- (tt_common_entry->flags &
- BATADV_TT_CLIENT_ISOLA ? 'I' : '.'),
+ ((tt_common_entry->flags &
+ BATADV_TT_CLIENT_NEW) ? 'N' : '.'),
+ ((tt_common_entry->flags &
+ BATADV_TT_CLIENT_PENDING) ? 'X' : '.'),
+ ((tt_common_entry->flags &
+ BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
+ ((tt_common_entry->flags &
+ BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
no_purge ? 0 : last_seen_secs,
no_purge ? 0 : last_seen_msecs,
vlan->tt.crc);
@@ -1528,10 +1547,10 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
BATADV_PRINT_VID(tt_global_entry->common.vid),
best_entry->ttvn, best_entry->orig_node->orig,
last_ttvn, vlan->tt.crc,
- (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
- (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
- (flags & BATADV_TT_CLIENT_ISOLA ? 'I' : '.'),
- (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+ ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
+ ((flags & BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
+ ((flags & BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
+ ((flags & BATADV_TT_CLIENT_TEMP) ? 'T' : '.'));
batadv_orig_node_vlan_free_ref(vlan);
}
@@ -1560,10 +1579,10 @@ print_list:
BATADV_PRINT_VID(tt_global_entry->common.vid),
orig_entry->ttvn, orig_entry->orig_node->orig,
last_ttvn, vlan->tt.crc,
- (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
- (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
- (flags & BATADV_TT_CLIENT_ISOLA ? 'I' : '.'),
- (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+ ((flags & BATADV_TT_CLIENT_ROAM) ? 'R' : '.'),
+ ((flags & BATADV_TT_CLIENT_WIFI) ? 'W' : '.'),
+ ((flags & BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
+ ((flags & BATADV_TT_CLIENT_TEMP) ? 'T' : '.'));
batadv_orig_node_vlan_free_ref(vlan);
}
@@ -2529,7 +2548,7 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
req_src, tt_data->ttvn, req_dst,
- (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
@@ -2660,7 +2679,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
req_src, tt_data->ttvn,
- (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
spin_lock_bh(&bat_priv->tt.commit_lock);
@@ -2899,7 +2918,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
resp_src, tt_data->ttvn, num_entries,
- (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+ ((tt_data->flags & BATADV_TT_FULL_TABLE) ? 'F' : '.'));
orig_node = batadv_orig_hash_find(bat_priv, resp_src);
if (!orig_node)
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index ad84d7b89e39..6acc25d3a925 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -18,6 +18,15 @@
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
+#include "main.h"
+
+#include <linux/types.h>
+
+struct batadv_orig_node;
+struct batadv_priv;
+struct net_device;
+struct seq_file;
+
int batadv_tt_init(struct batadv_priv *bat_priv);
bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
unsigned short vid, int ifindex, uint32_t mark);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 9398c3fb4174..67d63483618e 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -18,9 +18,23 @@
#ifndef _NET_BATMAN_ADV_TYPES_H_
#define _NET_BATMAN_ADV_TYPES_H_
+#ifndef _NET_BATMAN_ADV_MAIN_H_
+#error only "main.h" can be included directly
+#endif
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h> /* for linux/wait.h */
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
#include "packet.h"
-#include "bitarray.h"
-#include <linux/kernel.h>
+
+struct seq_file;
#ifdef CONFIG_BATMAN_ADV_DAT
@@ -132,6 +146,7 @@ struct batadv_orig_ifinfo {
* @timestamp: time (jiffie) of last received fragment
* @seqno: sequence number of the fragments in the list
* @size: accumulated size of packets in list
+ * @total_size: expected size of the assembled packet
*/
struct batadv_frag_table_entry {
struct hlist_head head;
@@ -139,6 +154,7 @@ struct batadv_frag_table_entry {
unsigned long timestamp;
uint16_t seqno;
uint16_t size;
+ uint16_t total_size;
};
/**
@@ -181,9 +197,10 @@ struct batadv_orig_node_vlan {
/**
* struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- * rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
+ * @bcast_own: set of bitfields (one per hard interface) where each one counts
+ * the number of our OGMs this orig_node rebroadcasted "back" to us (relative
+ * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+ * @bcast_own_sum: sum of bcast_own
* @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
* neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
*/
@@ -1118,6 +1135,8 @@ struct batadv_forw_packet {
* @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or better
* than neigh2 for their respective outgoing interface from the metric
* prospective
+ * @bat_neigh_free: free the resources allocated by the routing algorithm for a
+ * neigh_node object
* @bat_orig_print: print the originator table (optional)
* @bat_orig_free: free the resources allocated by the routing algorithm for an
* orig_node object
@@ -1135,6 +1154,7 @@ struct batadv_algo_ops {
void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
+ /* neigh_node handling API */
int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
@@ -1144,6 +1164,7 @@ struct batadv_algo_ops {
struct batadv_hard_iface *if_outgoing1,
struct batadv_neigh_node *neigh2,
struct batadv_hard_iface *if_outgoing2);
+ void (*bat_neigh_free)(struct batadv_neigh_node *neigh);
/* orig_node handling API */
void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq,
struct batadv_hard_iface *hard_iface);
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 1742b849fcff..f3d6046c8ee7 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -192,7 +192,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
if (ipv6_addr_any(nexthop))
return NULL;
} else {
- nexthop = rt6_nexthop(rt);
+ nexthop = rt6_nexthop(rt, daddr);
/* We need to remember the address because it is needed
* by bt_xmit() when sending the packet. In bt_xmit(), the
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index bde2bdd9e929..b5116fa9835e 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -202,7 +202,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index d82787d417bd..ce86a7bae844 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -205,7 +205,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index c4802f3bd4c5..f6c99098959f 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -94,7 +94,6 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
char buf[32];
size_t buf_size = min(count, (sizeof(buf)-1));
bool enable;
- int err;
if (!test_bit(HCI_UP, &hdev->flags))
return -ENETDOWN;
@@ -121,12 +120,8 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
if (IS_ERR(skb))
return PTR_ERR(skb);
- err = -bt_to_errno(skb->data[0]);
kfree_skb(skb);
- if (err < 0)
- return err;
-
hci_dev_change_flag(hdev, HCI_DUT_MODE);
return count;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 56f9edbf3d05..5b14dcafcd08 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1377,7 +1377,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &hci_sock_ops;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index cb3fdde1968a..008ba439bd62 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -235,7 +235,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a7278f05eafb..244287706f91 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -43,7 +43,7 @@ static struct bt_sock_list l2cap_sk_list = {
static const struct proto_ops l2cap_sock_ops;
static void l2cap_sock_init(struct sock *sk, struct sock *parent);
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
- int proto, gfp_t prio);
+ int proto, gfp_t prio, int kern);
bool l2cap_is_socket(struct socket *sock)
{
@@ -1193,7 +1193,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
}
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
- GFP_ATOMIC);
+ GFP_ATOMIC, 0);
if (!sk) {
release_sock(parent);
return NULL;
@@ -1523,12 +1523,12 @@ static struct proto l2cap_proto = {
};
static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
- int proto, gfp_t prio)
+ int proto, gfp_t prio, int kern)
{
struct sock *sk;
struct l2cap_chan *chan;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern);
if (!sk)
return NULL;
@@ -1574,7 +1574,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &l2cap_sock_ops;
- sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+ sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7fd87e7135b5..a6f21f8c2f98 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7577,7 +7577,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
memset(&ev, 0, sizeof(ev));
/* Devices using resolvable or non-resolvable random addresses
- * without providing an indentity resolving key don't require
+ * without providing an identity resolving key don't require
* to store long term keys. Their addresses will change the
* next time around.
*
@@ -7617,7 +7617,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
/* For identity resolving keys from devices that are already
* using a public address or static random address, do not
* ask for storing this key. The identity resolving key really
- * is only mandatory for devices using resovlable random
+ * is only mandatory for devices using resolvable random
* addresses.
*
* Storing all identity resolving keys has the downside that
@@ -7646,7 +7646,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
memset(&ev, 0, sizeof(ev));
/* Devices using resolvable or non-resolvable random addresses
- * without providing an indentity resolving key don't require
+ * without providing an identity resolving key don't require
* to store signature resolving keys. Their addresses will change
* the next time around.
*
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 4fea24275b17..29709fbfd1f5 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -200,7 +200,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
BT_DBG("");
- err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
+ err = sock_create_kern(&init_net, PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
if (!err) {
struct sock *sk = (*sock)->sk;
sk->sk_data_ready = rfcomm_l2data_ready;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 825e8fb5114b..b2338e971b33 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -269,12 +269,12 @@ static struct proto rfcomm_proto = {
.obj_size = sizeof(struct rfcomm_pinfo)
};
-static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
{
struct rfcomm_dlc *d;
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
if (!sk)
return NULL;
@@ -324,7 +324,7 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock,
sock->ops = &rfcomm_sock_ops;
- sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+ sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
@@ -969,7 +969,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
goto done;
}
- sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
+ sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0);
if (!sk)
goto done;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 4322c833e748..6b6e59dc54cf 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -460,11 +460,11 @@ static struct proto sco_proto = {
.obj_size = sizeof(struct sco_pinfo)
};
-static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
{
struct sock *sk;
- sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
+ sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
if (!sk)
return NULL;
@@ -501,7 +501,7 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &sco_sock_ops;
- sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+ sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
@@ -1026,7 +1026,7 @@ static void sco_conn_ready(struct sco_conn *conn)
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
- BTPROTO_SCO, GFP_ATOMIC);
+ BTPROTO_SCO, GFP_ATOMIC, 0);
if (!sk) {
bh_unlock_sock(parent);
sco_conn_unlock(conn);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 1ab3dc9c8f99..659371af39e4 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -371,6 +371,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
uint8_t tmp[16], data[16];
int err;
+ SMP_DBG("k %16phN r %16phN", k, r);
+
if (!tfm) {
BT_ERR("tfm %p", tfm);
return -EINVAL;
@@ -400,6 +402,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
/* Most significant octet of encryptedData corresponds to data[0] */
swap_buf(data, r, 16);
+ SMP_DBG("r %16phN", r);
+
return err;
}
@@ -410,6 +414,10 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
u8 p1[16], p2[16];
int err;
+ SMP_DBG("k %16phN r %16phN", k, r);
+ SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra);
+ SMP_DBG("preq %7phN pres %7phN", preq, pres);
+
memset(p1, 0, 16);
/* p1 = pres || preq || _rat || _iat */
@@ -418,10 +426,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
memcpy(p1 + 2, preq, 7);
memcpy(p1 + 9, pres, 7);
- /* p2 = padding || ia || ra */
- memcpy(p2, ra, 6);
- memcpy(p2 + 6, ia, 6);
- memset(p2 + 12, 0, 4);
+ SMP_DBG("p1 %16phN", p1);
/* res = r XOR p1 */
u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
@@ -433,6 +438,13 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
return err;
}
+ /* p2 = padding || ia || ra */
+ memcpy(p2, ra, 6);
+ memcpy(p2 + 6, ia, 6);
+ memset(p2 + 12, 0, 4);
+
+ SMP_DBG("p2 %16phN", p2);
+
/* res = res XOR p2 */
u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 02c24cf63c34..a1abe4936fe1 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -121,13 +121,13 @@ static struct notifier_block br_device_notifier = {
.notifier_call = br_device_event
};
-static int br_netdev_switch_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int br_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
- struct net_device *dev = netdev_switch_notifier_info_to_dev(ptr);
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
- struct netdev_switch_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_fdb_info *fdb_info;
int err = NOTIFY_DONE;
rtnl_lock();
@@ -138,14 +138,14 @@ static int br_netdev_switch_event(struct notifier_block *unused,
br = p->br;
switch (event) {
- case NETDEV_SWITCH_FDB_ADD:
+ case SWITCHDEV_FDB_ADD:
fdb_info = ptr;
err = br_fdb_external_learn_add(br, p, fdb_info->addr,
fdb_info->vid);
if (err)
err = notifier_from_errno(err);
break;
- case NETDEV_SWITCH_FDB_DEL:
+ case SWITCHDEV_FDB_DEL:
fdb_info = ptr;
err = br_fdb_external_learn_del(br, p, fdb_info->addr,
fdb_info->vid);
@@ -159,8 +159,8 @@ out:
return err;
}
-static struct notifier_block br_netdev_switch_notifier = {
- .notifier_call = br_netdev_switch_event,
+static struct notifier_block br_switchdev_notifier = {
+ .notifier_call = br_switchdev_event,
};
static void __net_exit br_net_exit(struct net *net)
@@ -214,7 +214,7 @@ static int __init br_init(void)
if (err)
goto err_out3;
- err = register_netdev_switch_notifier(&br_netdev_switch_notifier);
+ err = register_switchdev_notifier(&br_switchdev_notifier);
if (err)
goto err_out4;
@@ -235,7 +235,7 @@ static int __init br_init(void)
return 0;
err_out5:
- unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
+ unregister_switchdev_notifier(&br_switchdev_notifier);
err_out4:
unregister_netdevice_notifier(&br_device_notifier);
err_out3:
@@ -253,7 +253,7 @@ static void __exit br_deinit(void)
{
stp_proto_unregister(&br_stp_proto);
br_netlink_fini();
- unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
+ unregister_switchdev_notifier(&br_switchdev_notifier);
unregister_netdevice_notifier(&br_device_notifier);
brioctl_set(NULL);
unregister_pernet_subsys(&br_net_ops);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 659fb96672e4..13949a71591d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -736,6 +736,12 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
struct net_bridge_fdb_entry *fdb;
bool modified = false;
+ /* If the port cannot learn allow only local and static entries */
+ if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+ !(source->state == BR_STATE_LEARNING ||
+ source->state == BR_STATE_FORWARDING))
+ return -EPERM;
+
fdb = fdb_find(head, addr, vid);
if (fdb == NULL) {
if (!(flags & NLM_F_CREATE))
@@ -867,13 +873,15 @@ out:
return err;
}
-static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
+static int fdb_delete_by_addr_and_port(struct net_bridge_port *p,
+ const u8 *addr, u16 vlan)
{
+ struct net_bridge *br = p->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
struct net_bridge_fdb_entry *fdb;
fdb = fdb_find(head, addr, vlan);
- if (!fdb)
+ if (!fdb || fdb->dst != p)
return -ENOENT;
fdb_delete(br, fdb);
@@ -886,7 +894,7 @@ static int __br_fdb_delete(struct net_bridge_port *p,
int err;
spin_lock_bh(&p->br->hash_lock);
- err = fdb_delete_by_addr(p->br, addr, vid);
+ err = fdb_delete_by_addr_and_port(p, addr, vid);
spin_unlock_bh(&p->br->hash_lock);
return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ff667e18b2d6..2e246a1a9b43 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -975,9 +975,6 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
int err = 0;
__be32 group;
- if (!pskb_may_pull(skb, sizeof(*ih)))
- return -EINVAL;
-
ih = igmpv3_report_hdr(skb);
num = ntohs(ih->ngrec);
len = sizeof(*ih);
@@ -1247,25 +1244,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
max_delay = 10 * HZ;
group = 0;
}
- } else {
- if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
- err = -EINVAL;
- goto out;
- }
-
+ } else if (skb->len >= sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs)
goto out;
max_delay = ih3->code ?
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
- }
-
- /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
- * all-systems destination addresses (224.0.0.1) for general queries
- */
- if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
- err = -EINVAL;
+ } else {
goto out;
}
@@ -1328,12 +1314,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
- if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
- err = -EINVAL;
- goto out;
- }
-
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@@ -1357,14 +1337,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
is_general_query = group && ipv6_addr_any(group);
- /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
- * all-nodes destination address (ff02::1) for general queries
- */
- if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
- err = -EINVAL;
- goto out;
- }
-
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
saddr.u.ip6 = ip6h->saddr;
@@ -1556,74 +1528,22 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
- struct sk_buff *skb2 = skb;
- const struct iphdr *iph;
+ struct sk_buff *skb_trimmed = NULL;
struct igmphdr *ih;
- unsigned int len;
- unsigned int offset;
int err;
- /* We treat OOM as packet loss for now. */
- if (!pskb_may_pull(skb, sizeof(*iph)))
- return -EINVAL;
-
- iph = ip_hdr(skb);
-
- if (iph->ihl < 5 || iph->version != 4)
- return -EINVAL;
-
- if (!pskb_may_pull(skb, ip_hdrlen(skb)))
- return -EINVAL;
-
- iph = ip_hdr(skb);
+ err = ip_mc_check_igmp(skb, &skb_trimmed);
- if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
- return -EINVAL;
-
- if (iph->protocol != IPPROTO_IGMP) {
- if (!ipv4_is_local_multicast(iph->daddr))
+ if (err == -ENOMSG) {
+ if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr))
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
return 0;
+ } else if (err < 0) {
+ return err;
}
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < ip_hdrlen(skb))
- return -EINVAL;
-
- if (skb->len > len) {
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (!skb2)
- return -ENOMEM;
-
- err = pskb_trim_rcsum(skb2, len);
- if (err)
- goto err_out;
- }
-
- len -= ip_hdrlen(skb2);
- offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
- __skb_pull(skb2, offset);
- skb_reset_transport_header(skb2);
-
- err = -EINVAL;
- if (!pskb_may_pull(skb2, sizeof(*ih)))
- goto out;
-
- switch (skb2->ip_summed) {
- case CHECKSUM_COMPLETE:
- if (!csum_fold(skb2->csum))
- break;
- /* fall through */
- case CHECKSUM_NONE:
- skb2->csum = 0;
- if (skb_checksum_complete(skb2))
- goto out;
- }
-
- err = 0;
-
BR_INPUT_SKB_CB(skb)->igmp = 1;
- ih = igmp_hdr(skb2);
+ ih = igmp_hdr(skb);
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
@@ -1632,21 +1552,19 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = br_ip4_multicast_add_group(br, port, ih->group, vid);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
- err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
+ err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
break;
case IGMP_HOST_MEMBERSHIP_QUERY:
- err = br_ip4_multicast_query(br, port, skb2, vid);
+ err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
br_ip4_multicast_leave_group(br, port, ih->group, vid);
break;
}
-out:
- __skb_push(skb2, offset);
-err_out:
- if (skb2 != skb)
- kfree_skb(skb2);
+ if (skb_trimmed)
+ kfree_skb(skb_trimmed);
+
return err;
}
@@ -1656,138 +1574,42 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
- struct sk_buff *skb2;
- const struct ipv6hdr *ip6h;
- u8 icmp6_type;
- u8 nexthdr;
- __be16 frag_off;
- unsigned int len;
- int offset;
+ struct sk_buff *skb_trimmed = NULL;
+ struct mld_msg *mld;
int err;
- if (!pskb_may_pull(skb, sizeof(*ip6h)))
- return -EINVAL;
-
- ip6h = ipv6_hdr(skb);
-
- /*
- * We're interested in MLD messages only.
- * - Version is 6
- * - MLD has always Router Alert hop-by-hop option
- * - But we do not support jumbrograms.
- */
- if (ip6h->version != 6)
- return 0;
-
- /* Prevent flooding this packet if there is no listener present */
- if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
- BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-
- if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
- ip6h->payload_len == 0)
- return 0;
-
- len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
- if (skb->len < len)
- return -EINVAL;
-
- nexthdr = ip6h->nexthdr;
- offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
+ err = ipv6_mc_check_mld(skb, &skb_trimmed);
- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
+ if (err == -ENOMSG) {
+ if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
return 0;
-
- /* Okay, we found ICMPv6 header */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (!skb2)
- return -ENOMEM;
-
- err = -EINVAL;
- if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
- goto out;
-
- len -= offset - skb_network_offset(skb2);
-
- __skb_pull(skb2, offset);
- skb_reset_transport_header(skb2);
- skb_postpull_rcsum(skb2, skb_network_header(skb2),
- skb_network_header_len(skb2));
-
- icmp6_type = icmp6_hdr(skb2)->icmp6_type;
-
- switch (icmp6_type) {
- case ICMPV6_MGM_QUERY:
- case ICMPV6_MGM_REPORT:
- case ICMPV6_MGM_REDUCTION:
- case ICMPV6_MLD2_REPORT:
- break;
- default:
- err = 0;
- goto out;
- }
-
- /* Okay, we found MLD message. Check further. */
- if (skb2->len > len) {
- err = pskb_trim_rcsum(skb2, len);
- if (err)
- goto out;
- err = -EINVAL;
- }
-
- ip6h = ipv6_hdr(skb2);
-
- switch (skb2->ip_summed) {
- case CHECKSUM_COMPLETE:
- if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
- IPPROTO_ICMPV6, skb2->csum))
- break;
- /*FALLTHROUGH*/
- case CHECKSUM_NONE:
- skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
- &ip6h->daddr,
- skb2->len,
- IPPROTO_ICMPV6, 0));
- if (__skb_checksum_complete(skb2))
- goto out;
+ } else if (err < 0) {
+ return err;
}
- err = 0;
-
BR_INPUT_SKB_CB(skb)->igmp = 1;
+ mld = (struct mld_msg *)skb_transport_header(skb);
- switch (icmp6_type) {
+ switch (mld->mld_type) {
case ICMPV6_MGM_REPORT:
- {
- struct mld_msg *mld;
- if (!pskb_may_pull(skb2, sizeof(*mld))) {
- err = -EINVAL;
- goto out;
- }
- mld = (struct mld_msg *)skb_transport_header(skb2);
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
break;
- }
case ICMPV6_MLD2_REPORT:
- err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
+ err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
break;
case ICMPV6_MGM_QUERY:
- err = br_ip6_multicast_query(br, port, skb2, vid);
+ err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
break;
case ICMPV6_MGM_REDUCTION:
- {
- struct mld_msg *mld;
- if (!pskb_may_pull(skb2, sizeof(*mld))) {
- err = -EINVAL;
- goto out;
- }
- mld = (struct mld_msg *)skb_transport_header(skb2);
br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
- }
+ break;
}
-out:
- kfree_skb(skb2);
+ if (skb_trimmed)
+ kfree_skb(skb_trimmed);
+
return err;
}
#endif
@@ -1949,11 +1771,9 @@ out:
int br_multicast_set_router(struct net_bridge *br, unsigned long val)
{
- int err = -ENOENT;
+ int err = -EINVAL;
spin_lock_bh(&br->multicast_lock);
- if (!netif_running(br->dev))
- goto unlock;
switch (val) {
case 0:
@@ -1964,13 +1784,8 @@ int br_multicast_set_router(struct net_bridge *br, unsigned long val)
br->multicast_router = val;
err = 0;
break;
-
- default:
- err = -EINVAL;
- break;
}
-unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
@@ -1979,11 +1794,9 @@ unlock:
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
{
struct net_bridge *br = p->br;
- int err = -ENOENT;
+ int err = -EINVAL;
spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
- goto unlock;
switch (val) {
case 0:
@@ -2005,13 +1818,8 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
br_multicast_add_router(br, p);
break;
-
- default:
- err = -EINVAL;
- break;
}
-unlock:
spin_unlock(&br->multicast_lock);
return err;
@@ -2116,15 +1924,11 @@ unlock:
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
{
- int err = -ENOENT;
+ int err = -EINVAL;
u32 old;
struct net_bridge_mdb_htable *mdb;
spin_lock_bh(&br->multicast_lock);
- if (!netif_running(br->dev))
- goto unlock;
-
- err = -EINVAL;
if (!is_power_of_2(val))
goto unlock;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 60ddfbeb47f5..46660a28feef 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -125,6 +125,14 @@ static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb)
return skb->nf_bridge;
}
+static void nf_bridge_info_free(struct sk_buff *skb)
+{
+ if (skb->nf_bridge) {
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+ }
+}
+
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
struct net_bridge_port *port;
@@ -832,17 +840,39 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
__skb_push(skb, data->encap_size);
+ nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(sk, skb);
}
+static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *))
+{
+ unsigned int mtu = ip_skb_dst_mtu(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt = skb_rtable(skb);
+ struct net_device *dev = rt->dst.dev;
+
+ if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
+ (IPCB(skb)->frag_max_size &&
+ IPCB(skb)->frag_max_size > mtu))) {
+ IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ return ip_do_fragment(sk, skb, output);
+}
+
static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
{
int ret;
int frag_max_size;
unsigned int mtu_reserved;
- if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
+ if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) {
+ nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(sk, skb);
+ }
mtu_reserved = nf_bridge_mtu_reduction(skb);
/* This is wrong! We should preserve the original fragment
@@ -866,8 +896,9 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
data->size);
- ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
+ ret = br_nf_ip_fragment(sk, skb, br_nf_push_frag_xmit);
} else {
+ nf_bridge_info_free(skb);
ret = br_dev_queue_push_xmit(sk, skb);
}
@@ -876,7 +907,8 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
#else
static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
{
- return br_dev_queue_push_xmit(sk, skb);
+ nf_bridge_info_free(skb);
+ return br_dev_queue_push_xmit(sk, skb);
}
#endif
@@ -964,6 +996,8 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
nf_bridge->neigh_header,
ETH_HLEN - ETH_ALEN);
skb->dev = nf_bridge->physindev;
+
+ nf_bridge->physoutdev = NULL;
br_handle_frame_finish(NULL, skb);
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4b5c236998ff..6b67ed3831de 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -586,7 +586,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
struct nlattr *afspec;
struct net_bridge_port *p;
struct nlattr *tb[IFLA_BRPORT_MAX + 1];
- int err = 0, ret_offload = 0;
+ int err = 0;
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
@@ -628,16 +628,6 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
afspec, RTM_SETLINK);
}
- if (p && !(flags & BRIDGE_FLAGS_SELF)) {
- /* set bridge attributes in hardware if supported
- */
- ret_offload = netdev_switch_port_bridge_setlink(dev, nlh,
- flags);
- if (ret_offload && ret_offload != -EOPNOTSUPP)
- br_warn(p->br, "error setting attrs on port %u(%s)\n",
- (unsigned int)p->port_no, p->dev->name);
- }
-
if (err == 0)
br_ifinfo_notify(RTM_NEWLINK, p);
out:
@@ -649,7 +639,7 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
{
struct nlattr *afspec;
struct net_bridge_port *p;
- int err = 0, ret_offload = 0;
+ int err = 0;
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!afspec)
@@ -668,16 +658,6 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
*/
br_ifinfo_notify(RTM_NEWLINK, p);
- if (p && !(flags & BRIDGE_FLAGS_SELF)) {
- /* del bridge attributes in hardware
- */
- ret_offload = netdev_switch_port_bridge_dellink(dev, nlh,
- flags);
- if (ret_offload && ret_offload != -EOPNOTSUPP)
- br_warn(p->br, "error deleting attrs on port %u (%s)\n",
- (unsigned int)p->port_no, p->dev->name);
- }
-
return err;
}
static int br_validate(struct nlattr *tb[], struct nlattr *data[])
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3362c29400f1..1f36fa70639b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -33,8 +33,8 @@
/* Control of forwarding link local multicast */
#define BR_GROUPFWD_DEFAULT 0
-/* Don't allow forwarding control protocols like STP and LLDP */
-#define BR_GROUPFWD_RESTRICTED 0x4007u
+/* Don't allow forwarding of control protocols like STP, MAC PAUSE and LACP */
+#define BR_GROUPFWD_RESTRICTED 0x0007u
/* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
#define BR_GROUPFWD_8021AD 0xB801u
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index fb3ebe615513..45f1ff113af9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -39,10 +39,14 @@ void br_log_state(const struct net_bridge_port *p)
void br_set_state(struct net_bridge_port *p, unsigned int state)
{
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_PORT_STP_STATE,
+ .u.stp_state = state,
+ };
int err;
p->state = state;
- err = netdev_switch_port_stp_update(p->dev, state);
+ err = switchdev_port_attr_set(p->dev, &attr);
if (err && err != -EOPNOTSUPP)
br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
(unsigned int) p->port_no, p->dev->name);
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 071d87214dde..0c40570069ba 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -164,8 +164,10 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
!(info->bitmask & EBT_STP_MASK))
return -EINVAL;
/* Make sure the match only receives stp frames */
- if (!ether_addr_equal(e->destmac, bridge_ula) ||
- !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
+ if (!par->nft_compat &&
+ (!ether_addr_equal(e->destmac, bridge_ula) ||
+ !ether_addr_equal(e->destmsk, msk) ||
+ !(e->bitmask & EBT_DESTMAC)))
return -EINVAL;
return 0;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 91180a7fc943..5149d9e71114 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -139,7 +139,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
ethproto = h->h_proto;
if (e->bitmask & EBT_802_3) {
- if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
+ if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO))
return 1;
} else if (!(e->bitmask & EBT_NOPROTO) &&
FWINV2(e->ethproto != ethproto, EBT_IPROTO))
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 112ad784838a..3cc71b9f5517 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -1055,7 +1055,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
* is really not used at all in the net/core or socket.c but the
* initialization makes sure that sock->state is not uninitialized.
*/
- sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 32d710eaf1fc..d4d404bdfc9a 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -179,7 +179,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
sock->ops = cp->ops;
- sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
+ sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
if (!sk) {
err = -ENOMEM;
goto errout;
diff --git a/net/can/gw.c b/net/can/gw.c
index a6f448e18ea8..455168718c2e 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -110,6 +110,7 @@ struct cf_mod {
void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
} csumfunc;
+ u32 uid;
};
@@ -548,6 +549,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
goto cancel;
}
+ if (gwj->mod.uid) {
+ if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
+ goto cancel;
+ }
+
if (gwj->mod.csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
&gwj->mod.csum.crc8) < 0)
@@ -619,6 +625,7 @@ static const struct nla_policy cgw_policy[CGW_MAX+1] = {
[CGW_DST_IF] = { .type = NLA_U32 },
[CGW_FILTER] = { .len = sizeof(struct can_filter) },
[CGW_LIM_HOPS] = { .type = NLA_U8 },
+ [CGW_MOD_UID] = { .type = NLA_U32 },
};
/* check for common and gwtype specific attributes */
@@ -761,6 +768,10 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
else
mod->csumfunc.xor = cgw_csum_xor_neg;
}
+
+ if (tb[CGW_MOD_UID]) {
+ nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
+ }
}
if (gwtype == CGW_TYPE_CAN_CAN) {
@@ -802,6 +813,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct rtcanmsg *r;
struct cgw_job *gwj;
+ struct cf_mod mod;
+ struct can_can_gw ccgw;
u8 limhops = 0;
int err = 0;
@@ -819,6 +832,36 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
+ err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+ if (err < 0)
+ return err;
+
+ if (mod.uid) {
+
+ ASSERT_RTNL();
+
+ /* check for updating an existing job with identical uid */
+ hlist_for_each_entry(gwj, &cgw_list, list) {
+
+ if (gwj->mod.uid != mod.uid)
+ continue;
+
+ /* interfaces & filters must be identical */
+ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+ return -EINVAL;
+
+ /* update modifications with disabled softirq & quit */
+ local_bh_disable();
+ memcpy(&gwj->mod, &mod, sizeof(mod));
+ local_bh_enable();
+ return 0;
+ }
+ }
+
+ /* ifindex == 0 is not allowed for job creation */
+ if (!ccgw.src_idx || !ccgw.dst_idx)
+ return -ENODEV;
+
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
if (!gwj)
return -ENOMEM;
@@ -828,18 +871,14 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
gwj->deleted_frames = 0;
gwj->flags = r->flags;
gwj->gwtype = r->gwtype;
+ gwj->limit_hops = limhops;
- err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw,
- &limhops);
- if (err < 0)
- goto out;
+ /* insert already parsed information */
+ memcpy(&gwj->mod, &mod, sizeof(mod));
+ memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
err = -ENODEV;
- /* ifindex == 0 is not allowed for job creation */
- if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
- goto out;
-
gwj->src.dev = __dev_get_by_index(&init_net, gwj->ccgw.src_idx);
if (!gwj->src.dev)
@@ -856,8 +895,6 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (gwj->dst.dev->type != ARPHRD_CAN)
goto out;
- gwj->limit_hops = limhops;
-
ASSERT_RTNL();
err = cgw_register_filter(gwj);
@@ -931,8 +968,15 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (gwj->limit_hops != limhops)
continue;
- if (memcmp(&gwj->mod, &mod, sizeof(mod)))
- continue;
+ /* we have a match when uid is enabled and identical */
+ if (gwj->mod.uid || mod.uid) {
+ if (gwj->mod.uid != mod.uid)
+ continue;
+ } else {
+ /* no uid => check for identical modifications */
+ if (memcmp(&gwj->mod, &mod, sizeof(mod)))
+ continue;
+ }
/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 967080a9f043..073262fea6dd 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -480,8 +480,8 @@ static int ceph_tcp_connect(struct ceph_connection *con)
int ret;
BUG_ON(con->sock);
- ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
- IPPROTO_TCP, &sock);
+ ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family,
+ SOCK_STREAM, IPPROTO_TCP, &sock);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/core/dev.c b/net/core/dev.c
index aa82f9ab6a36..6778a9999d52 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -135,6 +135,7 @@
#include <linux/if_macvlan.h>
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
+#include <linux/netfilter_ingress.h>
#include "net-sysfs.h"
@@ -468,10 +469,14 @@ EXPORT_SYMBOL(dev_remove_pack);
*/
void dev_add_offload(struct packet_offload *po)
{
- struct list_head *head = &offload_base;
+ struct packet_offload *elem;
spin_lock(&offload_lock);
- list_add_rcu(&po->list, head);
+ list_for_each_entry(elem, &offload_base, list) {
+ if (po->priority < elem->priority)
+ break;
+ }
+ list_add_rcu(&po->list, elem->list.prev);
spin_unlock(&offload_lock);
}
EXPORT_SYMBOL(dev_add_offload);
@@ -1630,7 +1635,7 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
static struct static_key ingress_needed __read_mostly;
void net_inc_ingress_queue(void)
@@ -2343,6 +2348,34 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
+/*
+ * Returns a Tx hash based on the given packet descriptor a Tx queues' number
+ * to be used as a distribution range.
+ */
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+ unsigned int num_tx_queues)
+{
+ u32 hash;
+ u16 qoffset = 0;
+ u16 qcount = num_tx_queues;
+
+ if (skb_rx_queue_recorded(skb)) {
+ hash = skb_get_rx_queue(skb);
+ while (unlikely(hash >= num_tx_queues))
+ hash -= num_tx_queues;
+ return hash;
+ }
+
+ if (dev->num_tc) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ qoffset = dev->tc_to_txq[tc].offset;
+ qcount = dev->tc_to_txq[tc].count;
+ }
+
+ return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
+}
+EXPORT_SYMBOL(__skb_tx_hash);
+
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
static const netdev_features_t null_features = 0;
@@ -2901,6 +2934,84 @@ int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(dev_loopback_xmit);
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ struct xps_dev_maps *dev_maps;
+ struct xps_map *map;
+ int queue_index = -1;
+
+ rcu_read_lock();
+ dev_maps = rcu_dereference(dev->xps_maps);
+ if (dev_maps) {
+ map = rcu_dereference(
+ dev_maps->cpu_map[skb->sender_cpu - 1]);
+ if (map) {
+ if (map->len == 1)
+ queue_index = map->queues[0];
+ else
+ queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
+ map->len)];
+ if (unlikely(queue_index >= dev->real_num_tx_queues))
+ queue_index = -1;
+ }
+ }
+ rcu_read_unlock();
+
+ return queue_index;
+#else
+ return -1;
+#endif
+}
+
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ int queue_index = sk_tx_queue_get(sk);
+
+ if (queue_index < 0 || skb->ooo_okay ||
+ queue_index >= dev->real_num_tx_queues) {
+ int new_index = get_xps_queue(dev, skb);
+ if (new_index < 0)
+ new_index = skb_tx_hash(dev, skb);
+
+ if (queue_index != new_index && sk &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, new_index);
+
+ queue_index = new_index;
+ }
+
+ return queue_index;
+}
+
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb,
+ void *accel_priv)
+{
+ int queue_index = 0;
+
+#ifdef CONFIG_XPS
+ if (skb->sender_cpu == 0)
+ skb->sender_cpu = raw_smp_processor_id() + 1;
+#endif
+
+ if (dev->real_num_tx_queues != 1) {
+ const struct net_device_ops *ops = dev->netdev_ops;
+ if (ops->ndo_select_queue)
+ queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+ __netdev_pick_tx);
+ else
+ queue_index = __netdev_pick_tx(dev, skb);
+
+ if (!accel_priv)
+ queue_index = netdev_cap_txqueue(dev, queue_index);
+ }
+
+ skb_set_queue_mapping(skb, queue_index);
+ return netdev_get_tx_queue(dev, queue_index);
+}
+
/**
* __dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -3513,66 +3624,47 @@ int (*br_fdb_test_addr_hook)(struct net_device *dev,
EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
-#ifdef CONFIG_NET_CLS_ACT
-/* TODO: Maybe we should just force sch_ingress to be compiled in
- * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
- * a compare and 2 stores extra right now if we dont have it on
- * but have CONFIG_NET_CLS_ACT
- * NOTE: This doesn't stop any functionality; if you dont have
- * the ingress scheduler, you just can't add policies on ingress.
- *
- */
-static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
-{
- struct net_device *dev = skb->dev;
- u32 ttl = G_TC_RTTL(skb->tc_verd);
- int result = TC_ACT_OK;
- struct Qdisc *q;
-
- if (unlikely(MAX_RED_LOOP < ttl++)) {
- net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
- skb->skb_iif, dev->ifindex);
- return TC_ACT_SHOT;
- }
-
- skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
- skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-
- q = rcu_dereference(rxq->qdisc);
- if (q != &noop_qdisc) {
- spin_lock(qdisc_lock(q));
- if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
- result = qdisc_enqueue_root(skb, q);
- spin_unlock(qdisc_lock(q));
- }
-
- return result;
-}
-
static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
+#ifdef CONFIG_NET_CLS_ACT
+ struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
+ struct tcf_result cl_res;
- if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
+ /* If there's at least one ingress present somewhere (so
+ * we get here via enabled static key), remaining devices
+ * that are not configured with an ingress qdisc will bail
+ * out here.
+ */
+ if (!cl)
return skb;
-
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
- switch (ing_filter(skb, rxq)) {
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
+ qdisc_bstats_update_cpu(cl->q, skb);
+
+ switch (tc_classify(skb, cl, &cl_res)) {
+ case TC_ACT_OK:
+ case TC_ACT_RECLASSIFY:
+ skb->tc_index = TC_H_MIN(cl_res.classid);
+ break;
case TC_ACT_SHOT:
+ qdisc_qstats_drop_cpu(cl->q);
case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
kfree_skb(skb);
return NULL;
+ default:
+ break;
}
-
+#endif /* CONFIG_NET_CLS_ACT */
return skb;
}
-#endif
/**
* netdev_rx_handler_register - register receive handler
@@ -3645,6 +3737,22 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
}
}
+static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
+ int *ret, struct net_device *orig_dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (nf_hook_ingress_active(skb)) {
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ }
+
+ return nf_hook_ingress(skb);
+ }
+#endif /* CONFIG_NETFILTER_INGRESS */
+ return 0;
+}
+
static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
{
struct packet_type *ptype, *pt_prev;
@@ -3704,13 +3812,17 @@ another_round:
}
skip_taps:
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
if (static_key_false(&ingress_needed)) {
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto unlock;
- }
+ if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
+ goto unlock;
+ }
+#endif
+#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = 0;
ncls:
#endif
@@ -6313,6 +6425,17 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
return 0;
}
+void netif_tx_stop_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_stop_queue(txq);
+ }
+}
+EXPORT_SYMBOL(netif_tx_stop_all_queues);
+
/**
* register_netdevice - register a network device
* @dev: device to register
@@ -6862,6 +6985,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->group = INIT_NETDEV_GROUP;
if (!dev->ethtool_ops)
dev->ethtool_ops = &default_ethtool_ops;
+
+ nf_hook_ingress_init(dev);
+
return dev;
free_all:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1d00b8922902..b495ab1797fa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -98,7 +98,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_RXALL_BIT] = "rx-all",
[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
[NETIF_F_BUSY_POLL_BIT] = "busy-poll",
- [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
};
static const char
@@ -107,6 +106,13 @@ rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = {
[ETH_RSS_HASH_XOR_BIT] = "xor",
};
+static const char
+tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
+ [ETHTOOL_ID_UNSPEC] = "Unspec",
+ [ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
+ [ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
+};
+
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gfeatures cmd = {
@@ -195,6 +201,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
if (sset == ETH_SS_RSS_HASH_FUNCS)
return ARRAY_SIZE(rss_hash_func_strings);
+ if (sset == ETH_SS_TUNABLES)
+ return ARRAY_SIZE(tunable_strings);
+
if (ops->get_sset_count && ops->get_strings)
return ops->get_sset_count(dev, sset);
else
@@ -212,6 +221,8 @@ static void __ethtool_get_strings(struct net_device *dev,
else if (stringset == ETH_SS_RSS_HASH_FUNCS)
memcpy(data, rss_hash_func_strings,
sizeof(rss_hash_func_strings));
+ else if (stringset == ETH_SS_TUNABLES)
+ memcpy(data, tunable_strings, sizeof(tunable_strings));
else
/* ops->get_strings is valid because checked earlier */
ops->get_strings(dev, stringset, data);
diff --git a/net/core/filter.c b/net/core/filter.c
index bf831a85c315..d271c06bf01f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -36,6 +36,7 @@
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <net/sock.h>
+#include <net/flow_dissector.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <asm/uaccess.h>
@@ -45,6 +46,7 @@
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
#include <linux/bpf.h>
+#include <net/sch_generic.h>
/**
* sk_filter - run a packet through a socket filter
@@ -355,8 +357,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
* for socket filters: ctx == 'struct sk_buff *', for seccomp:
* ctx == 'struct seccomp_data *'.
*/
-int bpf_convert_filter(struct sock_filter *prog, int len,
- struct bpf_insn *new_prog, int *new_len)
+static int bpf_convert_filter(struct sock_filter *prog, int len,
+ struct bpf_insn *new_prog, int *new_len)
{
int new_flen = 0, pass = 0, target, i;
struct bpf_insn *new_insn;
@@ -371,7 +373,8 @@ int bpf_convert_filter(struct sock_filter *prog, int len,
return -EINVAL;
if (new_prog) {
- addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
+ addrs = kcalloc(len, sizeof(*addrs),
+ GFP_KERNEL | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
}
@@ -751,7 +754,8 @@ static bool chk_code_allowed(u16 code_to_probe)
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
+static int bpf_check_classic(const struct sock_filter *filter,
+ unsigned int flen)
{
bool anc_found;
int pc;
@@ -825,7 +829,6 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
return -EINVAL;
}
-EXPORT_SYMBOL(bpf_check_classic);
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
const struct sock_fprog *fprog)
@@ -839,7 +842,9 @@ static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
fkprog = fp->orig_prog;
fkprog->len = fprog->len;
- fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
+
+ fkprog->filter = kmemdup(fp->insns, fsize,
+ GFP_KERNEL | __GFP_NOWARN);
if (!fkprog->filter) {
kfree(fp->orig_prog);
return -ENOMEM;
@@ -941,7 +946,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
* pass. At this time, the user BPF is stored in fp->insns.
*/
old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
@@ -988,7 +993,8 @@ out_err:
return ERR_PTR(err);
}
-static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
+static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
+ bpf_aux_classic_check_t trans)
{
int err;
@@ -1001,6 +1007,17 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
return ERR_PTR(err);
}
+ /* There might be additional checks and transformations
+ * needed on classic filters, f.e. in case of seccomp.
+ */
+ if (trans) {
+ err = trans(fp->insns, fp->len);
+ if (err) {
+ __bpf_prog_release(fp);
+ return ERR_PTR(err);
+ }
+ }
+
/* Probe if we can JIT compile the filter and if so, do
* the compilation of the filter.
*/
@@ -1050,7 +1067,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
/* bpf_prepare_filter() already takes care of freeing
* memory in case something goes wrong.
*/
- fp = bpf_prepare_filter(fp);
+ fp = bpf_prepare_filter(fp, NULL);
if (IS_ERR(fp))
return PTR_ERR(fp);
@@ -1059,6 +1076,53 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
}
EXPORT_SYMBOL_GPL(bpf_prog_create);
+/**
+ * bpf_prog_create_from_user - create an unattached filter from user buffer
+ * @pfp: the unattached filter that is created
+ * @fprog: the filter program
+ * @trans: post-classic verifier transformation handler
+ *
+ * This function effectively does the same as bpf_prog_create(), only
+ * that it builds up its insns buffer from user space provided buffer.
+ * It also allows for passing a bpf_aux_classic_check_t handler.
+ */
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+ bpf_aux_classic_check_t trans)
+{
+ unsigned int fsize = bpf_classic_proglen(fprog);
+ struct bpf_prog *fp;
+
+ /* Make sure new filter is there and in the right amounts. */
+ if (fprog->filter == NULL)
+ return -EINVAL;
+
+ fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
+ if (!fp)
+ return -ENOMEM;
+
+ if (copy_from_user(fp->insns, fprog->filter, fsize)) {
+ __bpf_prog_free(fp);
+ return -EFAULT;
+ }
+
+ fp->len = fprog->len;
+ /* Since unattached filters are not copied back to user
+ * space through sk_get_filter(), we do not need to hold
+ * a copy here, and can spare us the work.
+ */
+ fp->orig_prog = NULL;
+
+ /* bpf_prepare_filter() already takes care of freeing
+ * memory in case something goes wrong.
+ */
+ fp = bpf_prepare_filter(fp, trans);
+ if (IS_ERR(fp))
+ return PTR_ERR(fp);
+
+ *pfp = fp;
+ return 0;
+}
+
void bpf_prog_destroy(struct bpf_prog *fp)
{
__bpf_prog_release(fp);
@@ -1135,7 +1199,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
/* bpf_prepare_filter() already takes care of freeing
* memory in case something goes wrong.
*/
- prog = bpf_prepare_filter(prog);
+ prog = bpf_prepare_filter(prog, NULL);
if (IS_ERR(prog))
return PTR_ERR(prog);
@@ -1175,21 +1239,6 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
return 0;
}
-/**
- * bpf_skb_clone_not_writable - is the header of a clone not writable
- * @skb: buffer to check
- * @len: length up to which to write, can be negative
- *
- * Returns true if modifying the header part of the cloned buffer
- * does require the data to be copied. I.e. this version works with
- * negative lengths needed for eBPF case!
- */
-static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
-{
- return skb_header_cloned(skb) ||
- (int) skb_headroom(skb) + len > skb->hdr_len;
-}
-
#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
@@ -1212,9 +1261,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
return -EFAULT;
- offset -= skb->data - skb_mac_header(skb);
if (unlikely(skb_cloned(skb) &&
- bpf_skb_clone_unwritable(skb, offset + len)))
+ !skb_clone_writable(skb, offset + len)))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, len, buf);
@@ -1258,9 +1306,8 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
if (unlikely((u32) offset > 0xffff))
return -EFAULT;
- offset -= skb->data - skb_mac_header(skb);
if (unlikely(skb_cloned(skb) &&
- bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+ !skb_clone_writable(skb, offset + sizeof(sum))))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1306,9 +1353,8 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
if (unlikely((u32) offset > 0xffff))
return -EFAULT;
- offset -= skb->data - skb_mac_header(skb);
if (unlikely(skb_cloned(skb) &&
- bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+ !skb_clone_writable(skb, offset + sizeof(sum))))
return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1344,6 +1390,40 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = {
.arg5_type = ARG_ANYTHING,
};
+#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1)
+
+static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
+{
+ struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
+ struct net_device *dev;
+
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
+ if (unlikely(!dev))
+ return -EINVAL;
+
+ if (unlikely(!(dev->flags & IFF_UP)))
+ return -EINVAL;
+
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb2))
+ return -ENOMEM;
+
+ if (BPF_IS_REDIRECT_INGRESS(flags))
+ return dev_forward_skb(dev, skb2);
+
+ skb2->dev = dev;
+ return dev_queue_xmit(skb2);
+}
+
+const struct bpf_func_proto bpf_clone_redirect_proto = {
+ .func = bpf_clone_redirect,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *
sk_filter_func_proto(enum bpf_func_id func_id)
{
@@ -1358,6 +1438,10 @@ sk_filter_func_proto(enum bpf_func_id func_id)
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_get_smp_processor_id:
return &bpf_get_smp_processor_id_proto;
+ case BPF_FUNC_tail_call:
+ return &bpf_tail_call_proto;
+ case BPF_FUNC_ktime_get_ns:
+ return &bpf_ktime_get_ns_proto;
default:
return NULL;
}
@@ -1373,18 +1457,15 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_l3_csum_replace_proto;
case BPF_FUNC_l4_csum_replace:
return &bpf_l4_csum_replace_proto;
+ case BPF_FUNC_clone_redirect:
+ return &bpf_clone_redirect_proto;
default:
return sk_filter_func_proto(func_id);
}
}
-static bool sk_filter_is_valid_access(int off, int size,
- enum bpf_access_type type)
+static bool __is_valid_access(int off, int size, enum bpf_access_type type)
{
- /* only read is allowed */
- if (type != BPF_READ)
- return false;
-
/* check bounds */
if (off < 0 || off >= sizeof(struct __sk_buff))
return false;
@@ -1400,8 +1481,42 @@ static bool sk_filter_is_valid_access(int off, int size,
return true;
}
-static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
- struct bpf_insn *insn_buf)
+static bool sk_filter_is_valid_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (type == BPF_WRITE) {
+ switch (off) {
+ case offsetof(struct __sk_buff, cb[0]) ...
+ offsetof(struct __sk_buff, cb[4]):
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return __is_valid_access(off, size, type);
+}
+
+static bool tc_cls_act_is_valid_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (type == BPF_WRITE) {
+ switch (off) {
+ case offsetof(struct __sk_buff, mark):
+ case offsetof(struct __sk_buff, tc_index):
+ case offsetof(struct __sk_buff, cb[0]) ...
+ offsetof(struct __sk_buff, cb[4]):
+ break;
+ default:
+ return false;
+ }
+ }
+ return __is_valid_access(off, size, type);
+}
+
+static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
@@ -1434,8 +1549,34 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
offsetof(struct sk_buff, priority));
break;
+ case offsetof(struct __sk_buff, ingress_ifindex):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, skb_iif));
+ break;
+
+ case offsetof(struct __sk_buff, ifindex):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+
+ *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+ dst_reg, src_reg,
+ offsetof(struct sk_buff, dev));
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ offsetof(struct net_device, ifindex));
+ break;
+
case offsetof(struct __sk_buff, mark):
- return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, mark));
+ else
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, mark));
+ break;
case offsetof(struct __sk_buff, pkt_type):
return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
@@ -1450,6 +1591,38 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
case offsetof(struct __sk_buff, vlan_tci):
return convert_skb_access(SKF_AD_VLAN_TAG,
dst_reg, src_reg, insn);
+
+ case offsetof(struct __sk_buff, cb[0]) ...
+ offsetof(struct __sk_buff, cb[4]):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+
+ ctx_off -= offsetof(struct __sk_buff, cb[0]);
+ ctx_off += offsetof(struct sk_buff, cb);
+ ctx_off += offsetof(struct qdisc_skb_cb, data);
+ if (type == BPF_WRITE)
+ *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+ else
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+ break;
+
+ case offsetof(struct __sk_buff, tc_index):
+#ifdef CONFIG_NET_SCHED
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
+ offsetof(struct sk_buff, tc_index));
+ else
+ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ offsetof(struct sk_buff, tc_index));
+ break;
+#else
+ if (type == BPF_WRITE)
+ *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
+ else
+ *insn++ = BPF_MOV64_IMM(dst_reg, 0);
+ break;
+#endif
}
return insn - insn_buf;
@@ -1458,13 +1631,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
static const struct bpf_verifier_ops sk_filter_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
- .convert_ctx_access = sk_filter_convert_ctx_access,
+ .convert_ctx_access = bpf_net_convert_ctx_access,
};
static const struct bpf_verifier_ops tc_cls_act_ops = {
.get_func_proto = tc_cls_act_func_proto,
- .is_valid_access = sk_filter_is_valid_access,
- .convert_ctx_access = sk_filter_convert_ctx_access,
+ .is_valid_access = tc_cls_act_is_valid_access,
+ .convert_ctx_access = bpf_net_convert_ctx_access,
};
static struct bpf_prog_type_list sk_filter_type __read_mostly = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2c35c02a931e..476e5dda59e1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/export.h>
#include <linux/ip.h>
@@ -12,19 +13,60 @@
#include <linux/if_tunnel.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
-#include <net/flow_keys.h>
+#include <linux/stddef.h>
+#include <linux/if_ether.h>
+#include <linux/mpls.h>
+#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
-/* copy saddr & daddr, possibly using 64bit load/store
- * Equivalent to : flow->src = iph->saddr;
- * flow->dst = iph->daddr;
- */
-static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id)
+{
+ return flow_dissector->used_keys & (1 << key_id);
+}
+
+static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id)
+{
+ flow_dissector->used_keys |= (1 << key_id);
+}
+
+static void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
+ enum flow_dissector_key_id key_id,
+ void *target_container)
+{
+ return ((char *) target_container) + flow_dissector->offset[key_id];
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+ const struct flow_dissector_key *key,
+ unsigned int key_count)
{
- BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
- offsetof(typeof(*flow), src) + sizeof(flow->src));
- memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+ unsigned int i;
+
+ memset(flow_dissector, 0, sizeof(*flow_dissector));
+
+ for (i = 0; i < key_count; i++, key++) {
+ /* User should make sure that every key target offset is withing
+ * boundaries of unsigned short.
+ */
+ BUG_ON(key->offset > USHRT_MAX);
+ BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
+ key->key_id));
+
+ skb_flow_dissector_set_key(flow_dissector, key->key_id);
+ flow_dissector->offset[key->key_id] = key->offset;
+ }
+
+ /* Ensure that the dissector always includes control and basic key.
+ * That way we are able to avoid handling lack of these in fast path.
+ */
+ BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_CONTROL));
+ BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_BASIC));
}
+EXPORT_SYMBOL(skb_flow_dissector_init);
/**
* __skb_flow_get_ports - extract the upper layer ports and return them
@@ -63,17 +105,30 @@ EXPORT_SYMBOL(__skb_flow_get_ports);
/**
* __skb_flow_dissect - extract the flow_keys struct and return it
* @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
+ * @flow_dissector: list of keys to dissect
+ * @target_container: target structure to put dissected values into
* @data: raw buffer pointer to the packet, if NULL use skb->data
* @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
* @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
* @hlen: packet header length, if @data is NULL use skb_headlen(skb)
*
- * The function will try to retrieve the struct flow_keys from either the skbuff
- * or a raw buffer specified by the rest parameters
+ * The function will try to retrieve individual keys into target specified
+ * by flow_dissector from either the skbuff or a raw buffer specified by the
+ * rest parameters.
+ *
+ * Caller must take care of zeroing target container memory.
*/
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+bool __skb_flow_dissect(const struct sk_buff *skb,
+ struct flow_dissector *flow_dissector,
+ void *target_container,
void *data, __be16 proto, int nhoff, int hlen)
{
+ struct flow_dissector_key_control *key_control;
+ struct flow_dissector_key_basic *key_basic;
+ struct flow_dissector_key_addrs *key_addrs;
+ struct flow_dissector_key_ports *key_ports;
+ struct flow_dissector_key_tags *key_tags;
+ struct flow_dissector_key_keyid *key_keyid;
u8 ip_proto;
if (!data) {
@@ -83,7 +138,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
hlen = skb_headlen(skb);
}
- memset(flow, 0, sizeof(*flow));
+ /* It is ensured by skb_flow_dissector_init() that control key will
+ * be always present.
+ */
+ key_control = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ target_container);
+
+ /* It is ensured by skb_flow_dissector_init() that basic key will
+ * be always present.
+ */
+ key_basic = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ target_container);
+
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct ethhdr *eth = eth_hdr(skb);
+ struct flow_dissector_key_eth_addrs *key_eth_addrs;
+
+ key_eth_addrs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ target_container);
+ memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+ }
again:
switch (proto) {
@@ -100,14 +178,15 @@ ip:
if (ip_is_fragment(iph))
ip_proto = 0;
- /* skip the address processing if skb is NULL. The assumption
- * here is that if there is no skb we are not looking for flow
- * info but lengths and protocols.
- */
- if (!skb)
+ if (!skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS))
break;
- iph_to_flow_copy_addrs(flow, iph);
+ key_addrs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container);
+ memcpy(&key_addrs->v4addrs, &iph->saddr,
+ sizeof(key_addrs->v4addrs));
+ key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
break;
}
case htons(ETH_P_IPV6): {
@@ -123,25 +202,27 @@ ipv6:
ip_proto = iph->nexthdr;
nhoff += sizeof(struct ipv6hdr);
- /* see comment above in IPv4 section */
- if (!skb)
- break;
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
+
+ key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ target_container);
- flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
- flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+ memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs));
+ key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
flow_label = ip6_flowlabel(iph);
if (flow_label) {
- /* Awesome, IPv6 packet has a flow label so we can
- * use that to represent the ports without any
- * further dissection.
- */
- flow->n_proto = proto;
- flow->ip_proto = ip_proto;
- flow->ports = flow_label;
- flow->thoff = (u16)nhoff;
-
- return true;
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+ key_tags = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL,
+ target_container);
+ key_tags->flow_label = ntohl(flow_label);
+ }
}
break;
@@ -155,6 +236,15 @@ ipv6:
if (!vlan)
return false;
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_VLANID)) {
+ key_tags = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_VLANID,
+ target_container);
+
+ key_tags->vlan_id = skb_vlan_tag_get_id(skb);
+ }
+
proto = vlan->h_vlan_encapsulated_proto;
nhoff += sizeof(*vlan);
goto again;
@@ -186,19 +276,58 @@ ipv6:
hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
- flow->src = hdr->srcnode;
- flow->dst = 0;
- flow->n_proto = proto;
- flow->thoff = (u16)nhoff;
+ key_basic->n_proto = proto;
+ key_control->thoff = (u16)nhoff;
+
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
+ key_addrs = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+ target_container);
+ key_addrs->tipcaddrs.srcnode = hdr->srcnode;
+ key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
+ }
+ return true;
+ }
+
+ case htons(ETH_P_MPLS_UC):
+ case htons(ETH_P_MPLS_MC): {
+ struct mpls_label *hdr, _hdr[2];
+mpls:
+ hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
+ hlen, &_hdr);
+ if (!hdr)
+ return false;
+
+ if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
+ MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
+ key_keyid = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
+ target_container);
+ key_keyid->keyid = hdr[1].entry &
+ htonl(MPLS_LS_LABEL_MASK);
+ }
+
+ key_basic->n_proto = proto;
+ key_basic->ip_proto = ip_proto;
+ key_control->thoff = (u16)nhoff;
+
+ return true;
+ }
+
return true;
}
+
case htons(ETH_P_FCOE):
- flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
+ key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
/* fall through */
default:
return false;
}
+ip_proto_again:
switch (ip_proto) {
case IPPROTO_GRE: {
struct gre_hdr {
@@ -213,30 +342,65 @@ ipv6:
* Only look inside GRE if version zero and no
* routing
*/
- if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
- proto = hdr->proto;
+ if (hdr->flags & (GRE_VERSION | GRE_ROUTING))
+ break;
+
+ proto = hdr->proto;
+ nhoff += 4;
+ if (hdr->flags & GRE_CSUM)
nhoff += 4;
- if (hdr->flags & GRE_CSUM)
- nhoff += 4;
- if (hdr->flags & GRE_KEY)
- nhoff += 4;
- if (hdr->flags & GRE_SEQ)
- nhoff += 4;
- if (proto == htons(ETH_P_TEB)) {
- const struct ethhdr *eth;
- struct ethhdr _eth;
-
- eth = __skb_header_pointer(skb, nhoff,
- sizeof(_eth),
- data, hlen, &_eth);
- if (!eth)
- return false;
- proto = eth->h_proto;
- nhoff += sizeof(*eth);
+ if (hdr->flags & GRE_KEY) {
+ const __be32 *keyid;
+ __be32 _keyid;
+
+ keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid),
+ data, hlen, &_keyid);
+
+ if (!keyid)
+ return false;
+
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+ key_keyid = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_GRE_KEYID,
+ target_container);
+ key_keyid->keyid = *keyid;
}
- goto again;
+ nhoff += 4;
}
- break;
+ if (hdr->flags & GRE_SEQ)
+ nhoff += 4;
+ if (proto == htons(ETH_P_TEB)) {
+ const struct ethhdr *eth;
+ struct ethhdr _eth;
+
+ eth = __skb_header_pointer(skb, nhoff,
+ sizeof(_eth),
+ data, hlen, &_eth);
+ if (!eth)
+ return false;
+ proto = eth->h_proto;
+ nhoff += sizeof(*eth);
+ }
+ goto again;
+ }
+ case NEXTHDR_HOP:
+ case NEXTHDR_ROUTING:
+ case NEXTHDR_DEST: {
+ u8 _opthdr[2], *opthdr;
+
+ if (proto != htons(ETH_P_IPV6))
+ break;
+
+ opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
+ data, hlen, &_opthdr);
+ if (!opthdr)
+ return false;
+
+ ip_proto = opthdr[0];
+ nhoff += (opthdr[1] + 1) << 3;
+
+ goto ip_proto_again;
}
case IPPROTO_IPIP:
proto = htons(ETH_P_IP);
@@ -244,18 +408,25 @@ ipv6:
case IPPROTO_IPV6:
proto = htons(ETH_P_IPV6);
goto ipv6;
+ case IPPROTO_MPLS:
+ proto = htons(ETH_P_MPLS_UC);
+ goto mpls;
default:
break;
}
- flow->n_proto = proto;
- flow->ip_proto = ip_proto;
- flow->thoff = (u16) nhoff;
-
- /* unless skb is set we don't need to record port info */
- if (skb)
- flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
- data, hlen);
+ key_basic->n_proto = proto;
+ key_basic->ip_proto = ip_proto;
+ key_control->thoff = (u16)nhoff;
+
+ if (skb_flow_dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS)) {
+ key_ports = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ target_container);
+ key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+ data, hlen);
+ }
return true;
}
@@ -267,27 +438,109 @@ static __always_inline void __flow_hash_secret_init(void)
net_get_random_once(&hashrnd, sizeof(hashrnd));
}
-static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
+static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
{
- __flow_hash_secret_init();
- return jhash_3words(a, b, c, hashrnd);
+ return jhash2(words, length, keyval);
}
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
+static inline void *flow_keys_hash_start(struct flow_keys *flow)
{
- u32 hash;
+ BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
+ return (void *)flow + FLOW_KEYS_HASH_OFFSET;
+}
+
+static inline size_t flow_keys_hash_length(struct flow_keys *flow)
+{
+ size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+ BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
+ BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
+ sizeof(*flow) - sizeof(flow->addrs));
+
+ switch (flow->control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ diff -= sizeof(flow->addrs.v4addrs);
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ diff -= sizeof(flow->addrs.v6addrs);
+ break;
+ case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+ diff -= sizeof(flow->addrs.tipcaddrs);
+ break;
+ }
+ return (sizeof(*flow) - diff) / sizeof(u32);
+}
+
+__be32 flow_get_u32_src(const struct flow_keys *flow)
+{
+ switch (flow->control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ return flow->addrs.v4addrs.src;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ return (__force __be32)ipv6_addr_hash(
+ &flow->addrs.v6addrs.src);
+ case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+ return flow->addrs.tipcaddrs.srcnode;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(flow_get_u32_src);
- /* get a consistent hash (same value on both flow directions) */
- if (((__force u32)keys->dst < (__force u32)keys->src) ||
- (((__force u32)keys->dst == (__force u32)keys->src) &&
- ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
- swap(keys->dst, keys->src);
- swap(keys->port16[0], keys->port16[1]);
+__be32 flow_get_u32_dst(const struct flow_keys *flow)
+{
+ switch (flow->control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ return flow->addrs.v4addrs.dst;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ return (__force __be32)ipv6_addr_hash(
+ &flow->addrs.v6addrs.dst);
+ default:
+ return 0;
}
+}
+EXPORT_SYMBOL(flow_get_u32_dst);
- hash = __flow_hash_3words((__force u32)keys->dst,
- (__force u32)keys->src,
- (__force u32)keys->ports);
+static inline void __flow_hash_consistentify(struct flow_keys *keys)
+{
+ int addr_diff, i;
+
+ switch (keys->control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ addr_diff = (__force u32)keys->addrs.v4addrs.dst -
+ (__force u32)keys->addrs.v4addrs.src;
+ if ((addr_diff < 0) ||
+ (addr_diff == 0 &&
+ ((__force u16)keys->ports.dst <
+ (__force u16)keys->ports.src))) {
+ swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+ swap(keys->ports.src, keys->ports.dst);
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ addr_diff = memcmp(&keys->addrs.v6addrs.dst,
+ &keys->addrs.v6addrs.src,
+ sizeof(keys->addrs.v6addrs.dst));
+ if ((addr_diff < 0) ||
+ (addr_diff == 0 &&
+ ((__force u16)keys->ports.dst <
+ (__force u16)keys->ports.src))) {
+ for (i = 0; i < 4; i++)
+ swap(keys->addrs.v6addrs.src.s6_addr32[i],
+ keys->addrs.v6addrs.dst.s6_addr32[i]);
+ swap(keys->ports.src, keys->ports.dst);
+ }
+ break;
+ }
+}
+
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+{
+ u32 hash;
+
+ __flow_hash_consistentify(keys);
+
+ hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
+ flow_keys_hash_length(keys), keyval);
if (!hash)
hash = 1;
@@ -296,12 +549,52 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
u32 flow_hash_from_keys(struct flow_keys *keys)
{
- return __flow_hash_from_keys(keys);
+ __flow_hash_secret_init();
+ return __flow_hash_from_keys(keys, hashrnd);
}
EXPORT_SYMBOL(flow_hash_from_keys);
-/*
- * __skb_get_hash: calculate a flow hash based on src/dst addresses
+static inline u32 ___skb_get_hash(const struct sk_buff *skb,
+ struct flow_keys *keys, u32 keyval)
+{
+ if (!skb_flow_dissect_flow_keys(skb, keys))
+ return 0;
+
+ return __flow_hash_from_keys(keys, keyval);
+}
+
+struct _flow_keys_digest_data {
+ __be16 n_proto;
+ u8 ip_proto;
+ u8 padding;
+ __be32 ports;
+ __be32 src;
+ __be32 dst;
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+ const struct flow_keys *flow)
+{
+ struct _flow_keys_digest_data *data =
+ (struct _flow_keys_digest_data *)digest;
+
+ BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
+
+ memset(digest, 0, sizeof(*digest));
+
+ data->n_proto = flow->basic.n_proto;
+ data->ip_proto = flow->basic.ip_proto;
+ data->ports = flow->ports.ports;
+ data->src = flow->addrs.v4addrs.src;
+ data->dst = flow->addrs.v4addrs.dst;
+}
+EXPORT_SYMBOL(make_flow_keys_digest);
+
+/**
+ * __skb_get_hash: calculate a flow hash
+ * @skb: sk_buff to calculate flow hash from
+ *
+ * This function calculates a flow hash based on src/dst addresses
* and src/dst port numbers. Sets hash in skb to non-zero hash value
* on success, zero indicates no valid hash. Also, sets l4_hash in skb
* if hash is a canonical 4-tuple hash over transport ports.
@@ -309,53 +602,34 @@ EXPORT_SYMBOL(flow_hash_from_keys);
void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
+ u32 hash;
- if (!skb_flow_dissect(skb, &keys))
- return;
+ __flow_hash_secret_init();
- if (keys.ports)
+ hash = ___skb_get_hash(skb, &keys, hashrnd);
+ if (!hash)
+ return;
+ if (keys.ports.ports)
skb->l4_hash = 1;
-
skb->sw_hash = 1;
-
- skb->hash = __flow_hash_from_keys(&keys);
+ skb->hash = hash;
}
EXPORT_SYMBOL(__skb_get_hash);
-/*
- * Returns a Tx hash based on the given packet descriptor a Tx queues' number
- * to be used as a distribution range.
- */
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
- unsigned int num_tx_queues)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
{
- u32 hash;
- u16 qoffset = 0;
- u16 qcount = num_tx_queues;
-
- if (skb_rx_queue_recorded(skb)) {
- hash = skb_get_rx_queue(skb);
- while (unlikely(hash >= num_tx_queues))
- hash -= num_tx_queues;
- return hash;
- }
-
- if (dev->num_tc) {
- u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
- qoffset = dev->tc_to_txq[tc].offset;
- qcount = dev->tc_to_txq[tc].count;
- }
+ struct flow_keys keys;
- return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
+ return ___skb_get_hash(skb, &keys, perturb);
}
-EXPORT_SYMBOL(__skb_tx_hash);
+EXPORT_SYMBOL(skb_get_hash_perturb);
u32 __skb_get_poff(const struct sk_buff *skb, void *data,
const struct flow_keys *keys, int hlen)
{
- u32 poff = keys->thoff;
+ u32 poff = keys->control.thoff;
- switch (keys->ip_proto) {
+ switch (keys->basic.ip_proto) {
case IPPROTO_TCP: {
/* access doff as u8 to avoid unaligned access */
const u8 *doff;
@@ -396,8 +670,12 @@ u32 __skb_get_poff(const struct sk_buff *skb, void *data,
return poff;
}
-/* skb_get_poff() returns the offset to the payload as far as it could
- * be dissected. The main user is currently BPF, so that we can dynamically
+/**
+ * skb_get_poff - get the offset to the payload
+ * @skb: sk_buff to get the payload offset from
+ *
+ * The function will get the offset to the payload as far as it could
+ * be dissected. The main user is currently BPF, so that we can dynamically
* truncate packets without needing to push actual payload to the user
* space and can analyze headers only, instead.
*/
@@ -405,86 +683,76 @@ u32 skb_get_poff(const struct sk_buff *skb)
{
struct flow_keys keys;
- if (!skb_flow_dissect(skb, &keys))
+ if (!skb_flow_dissect_flow_keys(skb, &keys))
return 0;
return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
}
-static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+static const struct flow_dissector_key flow_keys_dissector_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v4addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.v6addrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+ .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_PORTS,
+ .offset = offsetof(struct flow_keys, ports),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_VLANID,
+ .offset = offsetof(struct flow_keys, tags),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+ .offset = offsetof(struct flow_keys, tags),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+ .offset = offsetof(struct flow_keys, keyid),
+ },
+};
+
+static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
+ {
+ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+ .offset = offsetof(struct flow_keys, control),
+ },
+ {
+ .key_id = FLOW_DISSECTOR_KEY_BASIC,
+ .offset = offsetof(struct flow_keys, basic),
+ },
+};
+
+struct flow_dissector flow_keys_dissector __read_mostly;
+EXPORT_SYMBOL(flow_keys_dissector);
+
+struct flow_dissector flow_keys_buf_dissector __read_mostly;
+
+static int __init init_default_flow_dissectors(void)
{
-#ifdef CONFIG_XPS
- struct xps_dev_maps *dev_maps;
- struct xps_map *map;
- int queue_index = -1;
-
- rcu_read_lock();
- dev_maps = rcu_dereference(dev->xps_maps);
- if (dev_maps) {
- map = rcu_dereference(
- dev_maps->cpu_map[skb->sender_cpu - 1]);
- if (map) {
- if (map->len == 1)
- queue_index = map->queues[0];
- else
- queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
- map->len)];
- if (unlikely(queue_index >= dev->real_num_tx_queues))
- queue_index = -1;
- }
- }
- rcu_read_unlock();
-
- return queue_index;
-#else
- return -1;
-#endif
-}
-
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
- int queue_index = sk_tx_queue_get(sk);
-
- if (queue_index < 0 || skb->ooo_okay ||
- queue_index >= dev->real_num_tx_queues) {
- int new_index = get_xps_queue(dev, skb);
- if (new_index < 0)
- new_index = skb_tx_hash(dev, skb);
-
- if (queue_index != new_index && sk &&
- rcu_access_pointer(sk->sk_dst_cache))
- sk_tx_queue_set(sk, new_index);
-
- queue_index = new_index;
- }
-
- return queue_index;
+ skb_flow_dissector_init(&flow_keys_dissector,
+ flow_keys_dissector_keys,
+ ARRAY_SIZE(flow_keys_dissector_keys));
+ skb_flow_dissector_init(&flow_keys_buf_dissector,
+ flow_keys_buf_dissector_keys,
+ ARRAY_SIZE(flow_keys_buf_dissector_keys));
+ return 0;
}
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb,
- void *accel_priv)
-{
- int queue_index = 0;
-
-#ifdef CONFIG_XPS
- if (skb->sender_cpu == 0)
- skb->sender_cpu = raw_smp_processor_id() + 1;
-#endif
-
- if (dev->real_num_tx_queues != 1) {
- const struct net_device_ops *ops = dev->netdev_ops;
- if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
- __netdev_pick_tx);
- else
- queue_index = __netdev_pick_tx(dev, skb);
-
- if (!accel_priv)
- queue_index = netdev_cap_txqueue(dev, queue_index);
- }
-
- skb_set_queue_mapping(skb, queue_index);
- return netdev_get_tx_queue(dev, queue_index);
-}
+late_initcall_sync(init_default_flow_dissectors);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 3de654256028..3a74df750af4 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -913,6 +913,7 @@ static void neigh_timer_handler(unsigned long arg)
neigh->nud_state = NUD_PROBE;
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
+ notify = 1;
next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
}
} else {
@@ -1144,6 +1145,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
if (new != old) {
neigh_del_timer(neigh);
+ if (new & NUD_PROBE)
+ atomic_set(&neigh->probes, 0);
if (new & NUD_IN_TIMER)
neigh_add_timer(neigh, (jiffies +
((new & NUD_REACHABLE) ?
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4238d6da5c60..18b34d771ed4 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -458,11 +458,15 @@ static ssize_t phys_switch_id_show(struct device *dev,
return restart_syscall();
if (dev_isalive(netdev)) {
- struct netdev_phys_item_id ppid;
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ .flags = SWITCHDEV_F_NO_RECURSE,
+ };
- ret = netdev_switch_parent_id_get(netdev, &ppid);
+ ret = switchdev_port_attr_get(netdev, &attr);
if (!ret)
- ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+ ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
+ attr.u.ppid.id);
}
rtnl_unlock();
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 572af0011997..2c2eb1b629b1 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -147,24 +147,17 @@ static void ops_free_list(const struct pernet_operations *ops,
}
}
-static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
- int id);
+/* should be called with nsid_lock held */
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
- int min = 0, max = 0, id;
-
- ASSERT_RTNL();
+ int min = 0, max = 0;
if (reqid >= 0) {
min = reqid;
max = reqid + 1;
}
- id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
- if (id >= 0)
- rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
-
- return id;
+ return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
}
/* This function is used by idr_for_each(). If net is equal to peer, the
@@ -180,11 +173,16 @@ static int net_eq_idr(int id, void *net, void *peer)
return 0;
}
-static int __peernet2id(struct net *net, struct net *peer, bool alloc)
+/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
+ * is set to true, thus the caller knows that the new id must be notified via
+ * rtnl.
+ */
+static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
{
int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
+ bool alloc_it = *alloc;
- ASSERT_RTNL();
+ *alloc = false;
/* Magic value for id 0. */
if (id == NET_ID_ZERO)
@@ -192,36 +190,77 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
if (id > 0)
return id;
- if (alloc)
- return alloc_netid(net, peer, -1);
+ if (alloc_it) {
+ id = alloc_netid(net, peer, -1);
+ *alloc = true;
+ return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+ }
+
+ return NETNSA_NSID_NOT_ASSIGNED;
+}
+
+/* should be called with nsid_lock held */
+static int __peernet2id(struct net *net, struct net *peer)
+{
+ bool no = false;
- return -ENOENT;
+ return __peernet2id_alloc(net, peer, &no);
}
+static void rtnl_net_notifyid(struct net *net, int cmd, int id);
/* This function returns the id of a peer netns. If no id is assigned, one will
* be allocated and returned.
*/
+int peernet2id_alloc(struct net *net, struct net *peer)
+{
+ unsigned long flags;
+ bool alloc;
+ int id;
+
+ spin_lock_irqsave(&net->nsid_lock, flags);
+ alloc = atomic_read(&peer->count) == 0 ? false : true;
+ id = __peernet2id_alloc(net, peer, &alloc);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
+ if (alloc && id >= 0)
+ rtnl_net_notifyid(net, RTM_NEWNSID, id);
+ return id;
+}
+EXPORT_SYMBOL(peernet2id_alloc);
+
+/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
- bool alloc = atomic_read(&peer->count) == 0 ? false : true;
+ unsigned long flags;
int id;
- id = __peernet2id(net, peer, alloc);
- return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+ spin_lock_irqsave(&net->nsid_lock, flags);
+ id = __peernet2id(net, peer);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
+ return id;
+}
+
+/* This function returns true is the peer netns has an id assigned into the
+ * current netns.
+ */
+bool peernet_has_id(struct net *net, struct net *peer)
+{
+ return peernet2id(net, peer) >= 0;
}
-EXPORT_SYMBOL(peernet2id);
struct net *get_net_ns_by_id(struct net *net, int id)
{
+ unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
+ spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock();
return peer;
@@ -242,6 +281,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);
+ spin_lock_init(&net->nsid_lock);
list_for_each_entry(ops, &pernet_list, list) {
error = ops_init(ops, net);
@@ -362,14 +402,19 @@ static void cleanup_net(struct work_struct *work)
list_del_rcu(&net->list);
list_add_tail(&net->exit_list, &net_exit_list);
for_each_net(tmp) {
- int id = __peernet2id(tmp, net, false);
+ int id;
- if (id >= 0) {
- rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
+ spin_lock_irq(&tmp->nsid_lock);
+ id = __peernet2id(tmp, net);
+ if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- }
+ spin_unlock_irq(&tmp->nsid_lock);
+ if (id >= 0)
+ rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
+ spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids);
+ spin_unlock_irq(&net->nsid_lock);
}
rtnl_unlock();
@@ -497,6 +542,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ unsigned long flags;
struct net *peer;
int nsid, err;
@@ -517,14 +563,19 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer))
return PTR_ERR(peer);
- if (__peernet2id(net, peer, false) >= 0) {
+ spin_lock_irqsave(&net->nsid_lock, flags);
+ if (__peernet2id(net, peer) >= 0) {
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
- if (err > 0)
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
+ if (err >= 0) {
+ rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
+ }
out:
put_net(peer);
return err;
@@ -538,14 +589,10 @@ static int rtnl_net_get_size(void)
}
static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
- int cmd, struct net *net, struct net *peer,
- int nsid)
+ int cmd, struct net *net, int nsid)
{
struct nlmsghdr *nlh;
struct rtgenmsg *rth;
- int id;
-
- ASSERT_RTNL();
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
if (!nlh)
@@ -554,14 +601,7 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
rth = nlmsg_data(nlh);
rth->rtgen_family = AF_UNSPEC;
- if (nsid >= 0) {
- id = nsid;
- } else {
- id = __peernet2id(net, peer, false);
- if (id < 0)
- id = NETNSA_NSID_NOT_ASSIGNED;
- }
- if (nla_put_s32(skb, NETNSA_NSID, id))
+ if (nla_put_s32(skb, NETNSA_NSID, nsid))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -578,7 +618,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
struct nlattr *tb[NETNSA_MAX + 1];
struct sk_buff *msg;
struct net *peer;
- int err;
+ int err, id;
err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy);
@@ -600,8 +640,9 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
goto out;
}
+ id = peernet2id(net, peer);
err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
- RTM_NEWNSID, net, peer, -1);
+ RTM_NEWNSID, net, id);
if (err < 0)
goto err_out;
@@ -633,7 +674,7 @@ static int rtnl_net_dumpid_one(int id, void *peer, void *data)
ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWNSID, net_cb->net, peer, id);
+ RTM_NEWNSID, net_cb->net, id);
if (ret < 0)
return ret;
@@ -652,17 +693,17 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.idx = 0,
.s_idx = cb->args[0],
};
+ unsigned long flags;
- ASSERT_RTNL();
-
+ spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx;
return skb->len;
}
-static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
- int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id)
{
struct sk_buff *msg;
int err = -ENOMEM;
@@ -671,7 +712,7 @@ static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
if (!msg)
goto out;
- err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
+ err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
if (err < 0)
goto err_out;
diff --git a/net/core/netevent.c b/net/core/netevent.c
index f17ccd291d39..8b3bc4fac613 100644
--- a/net/core/netevent.c
+++ b/net/core/netevent.c
@@ -31,10 +31,7 @@ static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
*/
int register_netevent_notifier(struct notifier_block *nb)
{
- int err;
-
- err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
- return err;
+ return atomic_notifier_chain_register(&netevent_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(register_netevent_notifier);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 508155b283dd..d93cbc5715f4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -177,7 +177,7 @@
#include <asm/dma.h>
#include <asm/div64.h> /* do_div */
-#define VERSION "2.74"
+#define VERSION "2.75"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -210,6 +210,10 @@
#define T_REMDEVALL (1<<2) /* Remove all devs */
#define T_REMDEV (1<<3) /* Remove one dev */
+/* Xmit modes */
+#define M_START_XMIT 0 /* Default normal TX */
+#define M_NETIF_RECEIVE 1 /* Inject packets into stack */
+
/* If lock -- protects updating of if_list */
#define if_lock(t) spin_lock(&(t->if_lock));
#define if_unlock(t) spin_unlock(&(t->if_lock));
@@ -251,13 +255,14 @@ struct pktgen_dev {
* we will do a random selection from within the range.
*/
__u32 flags;
- int removal_mark; /* non-zero => the device is marked for
- * removal by worker thread */
-
+ int xmit_mode;
int min_pkt_size;
int max_pkt_size;
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags;
+ int removal_mark; /* non-zero => the device is marked for
+ * removal by worker thread */
+
struct page *page;
u64 delay; /* nano-seconds */
@@ -507,7 +512,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
pktgen_reset_all_threads(pn);
else
- pr_warn("Unknown command: %s\n", data);
+ return -EINVAL;
return count;
}
@@ -567,7 +572,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
" dst_min: %s dst_max: %s\n",
pkt_dev->dst_min, pkt_dev->dst_max);
seq_printf(seq,
- " src_min: %s src_max: %s\n",
+ " src_min: %s src_max: %s\n",
pkt_dev->src_min, pkt_dev->src_max);
}
@@ -620,6 +625,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->node >= 0)
seq_printf(seq, " node: %d\n", pkt_dev->node);
+ if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
+ seq_puts(seq, " xmit_mode: netif_receive\n");
+
seq_puts(seq, " Flags: ");
if (pkt_dev->flags & F_IPV6)
@@ -1081,7 +1089,8 @@ static ssize_t pktgen_if_write(struct file *file,
if (len < 0)
return len;
if ((value > 0) &&
- (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+ ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
+ !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
return -ENOTSUPP;
i += len;
pkt_dev->clone_skb = value;
@@ -1134,7 +1143,7 @@ static ssize_t pktgen_if_write(struct file *file,
return len;
i += len;
- if ((value > 1) &&
+ if ((value > 1) && (pkt_dev->xmit_mode == M_START_XMIT) &&
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
return -ENOTSUPP;
pkt_dev->burst = value < 1 ? 1 : value;
@@ -1160,6 +1169,45 @@ static ssize_t pktgen_if_write(struct file *file,
sprintf(pg_result, "ERROR: node not possible");
return count;
}
+ if (!strcmp(name, "xmit_mode")) {
+ char f[32];
+
+ memset(f, 0, 32);
+ len = strn_len(&user_buffer[i], sizeof(f) - 1);
+ if (len < 0)
+ return len;
+
+ if (copy_from_user(f, &user_buffer[i], len))
+ return -EFAULT;
+ i += len;
+
+ if (strcmp(f, "start_xmit") == 0) {
+ pkt_dev->xmit_mode = M_START_XMIT;
+ } else if (strcmp(f, "netif_receive") == 0) {
+ /* clone_skb set earlier, not supported in this mode */
+ if (pkt_dev->clone_skb > 0)
+ return -ENOTSUPP;
+
+ pkt_dev->xmit_mode = M_NETIF_RECEIVE;
+
+ /* make sure new packet is allocated every time
+ * pktgen_xmit() is called
+ */
+ pkt_dev->last_ok = 1;
+
+ /* override clone_skb if user passed default value
+ * at module loading time
+ */
+ pkt_dev->clone_skb = 0;
+ } else {
+ sprintf(pg_result,
+ "xmit_mode -:%s:- unknown\nAvailable modes: %s",
+ f, "start_xmit, netif_receive\n");
+ return count;
+ }
+ sprintf(pg_result, "OK: xmit_mode=%s", f);
+ return count;
+ }
if (!strcmp(name, "flag")) {
char f[32];
memset(f, 0, 32);
@@ -1267,6 +1315,9 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "NO_TIMESTAMP") == 0)
pkt_dev->flags |= F_NO_TIMESTAMP;
+ else if (strcmp(f, "!NO_TIMESTAMP") == 0)
+ pkt_dev->flags &= ~F_NO_TIMESTAMP;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2594,9 +2645,9 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
int nhead = 0;
if (x) {
- int ret;
- __u8 *eth;
+ struct ethhdr *eth;
struct iphdr *iph;
+ int ret;
nhead = x->props.header_len - skb_headroom(skb);
if (nhead > 0) {
@@ -2616,9 +2667,9 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
goto err;
}
/* restore ll */
- eth = (__u8 *) skb_push(skb, ETH_HLEN);
- memcpy(eth, pkt_dev->hh, 12);
- *(u16 *) &eth[12] = protocol;
+ eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+ memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
+ eth->h_proto = protocol;
/* Update IPv4 header len as well as checksum value */
iph = ip_hdr(skb);
@@ -3317,6 +3368,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq;
+ struct sk_buff *skb;
int ret;
/* If device is offline, then don't send */
@@ -3354,6 +3406,37 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->delay && pkt_dev->last_ok)
spin(pkt_dev, pkt_dev->next_tx);
+ if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
+ skb = pkt_dev->skb;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ atomic_add(burst, &skb->users);
+ local_bh_disable();
+ do {
+ ret = netif_receive_skb(skb);
+ if (ret == NET_RX_DROP)
+ pkt_dev->errors++;
+ pkt_dev->sofar++;
+ pkt_dev->seq_num++;
+ if (atomic_read(&skb->users) != burst) {
+ /* skb was queued by rps/rfs or taps,
+ * so cannot reuse this skb
+ */
+ atomic_sub(burst - 1, &skb->users);
+ /* get out of the loop and wait
+ * until skb is consumed
+ */
+ break;
+ }
+ /* skb was 'freed' by stack, so clean few
+ * bits and reuse it
+ */
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_verd = 0; /* reset reclass/redir ttl */
+#endif
+ } while (--burst > 0);
+ goto out; /* Skips xmit_mode M_START_XMIT */
+ }
+
txq = skb_get_tx_queue(odev, pkt_dev->skb);
local_bh_disable();
@@ -3401,6 +3484,7 @@ xmit_more:
unlock:
HARD_TX_UNLOCK(odev, txq);
+out:
local_bh_enable();
/* If pkt_dev->count is zero, then run forever */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8de36824018d..077b6d280371 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1004,16 +1004,20 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
- struct netdev_phys_item_id psid;
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ .flags = SWITCHDEV_F_NO_RECURSE,
+ };
- err = netdev_switch_parent_id_get(dev, &psid);
+ err = switchdev_port_attr_get(dev, &attr);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
return err;
}
- if (nla_put(skb, IFLA_PHYS_SWITCH_ID, psid.id_len, psid.id))
+ if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
+ attr.u.ppid.id))
return -EMSGSIZE;
return 0;
@@ -1204,7 +1208,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
if (!net_eq(dev_net(dev), link_net)) {
- int id = peernet2id(dev_net(dev), link_net);
+ int id = peernet2id_alloc(dev_net(dev), link_net);
if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
goto nla_put_failure;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 51dd3193a33e..fd3ce461fbe6 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -154,7 +154,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
net_secret_init();
memcpy(hash, saddr, 16);
for (i = 0; i < 4; i++)
- secret[i] = net_secret[i] + daddr[i];
+ secret[i] = net_secret[i] + (__force u32)daddr[i];
secret[4] = net_secret[4] +
(((__force u16)sport << 16) + (__force u16)dport);
for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 41ec02242ea7..b6a19ca0f99e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -347,94 +347,18 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
}
EXPORT_SYMBOL(build_skb);
-struct netdev_alloc_cache {
- struct page_frag frag;
- /* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_count every time we allocate a fragment.
- */
- unsigned int pagecnt_bias;
-};
-static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
-
-static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
- gfp_t gfp_mask)
-{
- const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
- struct page *page = NULL;
- gfp_t gfp = gfp_mask;
-
- if (order) {
- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
- __GFP_NOMEMALLOC;
- page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
- nc->frag.size = PAGE_SIZE << (page ? order : 0);
- }
-
- if (unlikely(!page))
- page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
-
- nc->frag.page = page;
-
- return page;
-}
-
-static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
- unsigned int fragsz, gfp_t gfp_mask)
-{
- struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
- struct page *page = nc->frag.page;
- unsigned int size;
- int offset;
-
- if (unlikely(!page)) {
-refill:
- page = __page_frag_refill(nc, gfp_mask);
- if (!page)
- return NULL;
-
- /* if size can vary use frag.size else just use PAGE_SIZE */
- size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
-
- /* Even if we own the page, we do not use atomic_set().
- * This would break get_page_unless_zero() users.
- */
- atomic_add(size - 1, &page->_count);
-
- /* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size;
- nc->frag.offset = size;
- }
-
- offset = nc->frag.offset - fragsz;
- if (unlikely(offset < 0)) {
- if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
- goto refill;
-
- /* if size can vary use frag.size else just use PAGE_SIZE */
- size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
-
- /* OK, page count is 0, we can safely set it */
- atomic_set(&page->_count, size);
-
- /* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size;
- offset = size - fragsz;
- }
-
- nc->pagecnt_bias--;
- nc->frag.offset = offset;
-
- return page_address(page) + offset;
-}
+static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
+ struct page_frag_cache *nc;
unsigned long flags;
void *data;
local_irq_save(flags);
- data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = __alloc_page_frag(nc, fragsz, gfp_mask);
local_irq_restore(flags);
return data;
}
@@ -454,7 +378,9 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
- return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
+ struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+
+ return __alloc_page_frag(nc, fragsz, gfp_mask);
}
void *napi_alloc_frag(unsigned int fragsz)
@@ -464,76 +390,70 @@ void *napi_alloc_frag(unsigned int fragsz)
EXPORT_SYMBOL(napi_alloc_frag);
/**
- * __alloc_rx_skb - allocate an skbuff for rx
+ * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ * @dev: network device to receive on
* @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
- * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
- * allocations in case we have to fallback to __alloc_skb()
- * If SKB_ALLOC_NAPI is set, page fragment will be allocated
- * from napi_cache instead of netdev_cache.
*
* Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has unspecified headroom built in. Users should allocate
+ * buffer has NET_SKB_PAD headroom built in. Users should allocate
* the headroom they think they need without accounting for the
* built in space. The built in space is used for optimisations.
*
* %NULL is returned if there is no free memory.
*/
-static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
- int flags)
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+ gfp_t gfp_mask)
{
- struct sk_buff *skb = NULL;
- unsigned int fragsz = SKB_DATA_ALIGN(length) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ struct page_frag_cache *nc;
+ unsigned long flags;
+ struct sk_buff *skb;
+ bool pfmemalloc;
+ void *data;
- if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
- void *data;
+ len += NET_SKB_PAD;
- if (sk_memalloc_socks())
- gfp_mask |= __GFP_MEMALLOC;
+ if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ (gfp_mask & (__GFP_WAIT | GFP_DMA))) {
+ skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (!skb)
+ goto skb_fail;
+ goto skb_success;
+ }
- data = (flags & SKB_ALLOC_NAPI) ?
- __napi_alloc_frag(fragsz, gfp_mask) :
- __netdev_alloc_frag(fragsz, gfp_mask);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
- if (likely(data)) {
- skb = build_skb(data, fragsz);
- if (unlikely(!skb))
- put_page(virt_to_head_page(data));
- }
- } else {
- skb = __alloc_skb(length, gfp_mask,
- SKB_ALLOC_RX, NUMA_NO_NODE);
- }
- return skb;
-}
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
-/**
- * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
- * @dev: network device to receive on
- * @length: length to allocate
- * @gfp_mask: get_free_pages mask, passed to alloc_skb
- *
- * Allocate a new &sk_buff and assign it a usage count of one. The
- * buffer has NET_SKB_PAD headroom built in. Users should allocate
- * the headroom they think they need without accounting for the
- * built in space. The built in space is used for optimisations.
- *
- * %NULL is returned if there is no free memory.
- */
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length, gfp_t gfp_mask)
-{
- struct sk_buff *skb;
+ local_irq_save(flags);
+
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = __alloc_page_frag(nc, len, gfp_mask);
+ pfmemalloc = nc->pfmemalloc;
- length += NET_SKB_PAD;
- skb = __alloc_rx_skb(length, gfp_mask, 0);
+ local_irq_restore(flags);
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD);
- skb->dev = dev;
+ if (unlikely(!data))
+ return NULL;
+
+ skb = __build_skb(data, len);
+ if (unlikely(!skb)) {
+ skb_free_frag(data);
+ return NULL;
}
+ /* use OR instead of assignment to avoid clearing of bits in mask */
+ if (pfmemalloc)
+ skb->pfmemalloc = 1;
+ skb->head_frag = 1;
+
+skb_success:
+ skb_reserve(skb, NET_SKB_PAD);
+ skb->dev = dev;
+
+skb_fail:
return skb;
}
EXPORT_SYMBOL(__netdev_alloc_skb);
@@ -551,19 +471,49 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
*
* %NULL is returned if there is no free memory.
*/
-struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
- unsigned int length, gfp_t gfp_mask)
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ gfp_t gfp_mask)
{
+ struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
struct sk_buff *skb;
+ void *data;
- length += NET_SKB_PAD + NET_IP_ALIGN;
- skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
+ len += NET_SKB_PAD + NET_IP_ALIGN;
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- skb->dev = napi->dev;
+ if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+ (gfp_mask & (__GFP_WAIT | GFP_DMA))) {
+ skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (!skb)
+ goto skb_fail;
+ goto skb_success;
}
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
+
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
+ data = __alloc_page_frag(nc, len, gfp_mask);
+ if (unlikely(!data))
+ return NULL;
+
+ skb = __build_skb(data, len);
+ if (unlikely(!skb)) {
+ skb_free_frag(data);
+ return NULL;
+ }
+
+ /* use OR instead of assignment to avoid clearing of bits in mask */
+ if (nc->pfmemalloc)
+ skb->pfmemalloc = 1;
+ skb->head_frag = 1;
+
+skb_success:
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb->dev = napi->dev;
+
+skb_fail:
return skb;
}
EXPORT_SYMBOL(__napi_alloc_skb);
@@ -611,10 +561,12 @@ static void skb_clone_fraglist(struct sk_buff *skb)
static void skb_free_head(struct sk_buff *skb)
{
+ unsigned char *head = skb->head;
+
if (skb->head_frag)
- put_page(virt_to_head_page(skb->head));
+ skb_free_frag(head);
else
- kfree(skb->head);
+ kfree(head);
}
static void skb_release_data(struct sk_buff *skb)
@@ -1918,15 +1870,39 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
return false;
}
+ssize_t skb_socket_splice(struct sock *sk,
+ struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd)
+{
+ int ret;
+
+ /* Drop the socket lock, otherwise we have reverse
+ * locking dependencies between sk_lock and i_mutex
+ * here as compared to sendfile(). We enter here
+ * with the socket lock held, and splice_to_pipe() will
+ * grab the pipe inode lock. For sendfile() emulation,
+ * we call into ->sendpage() with the i_mutex lock held
+ * and networking will grab the socket lock.
+ */
+ release_sock(sk);
+ ret = splice_to_pipe(pipe, spd);
+ lock_sock(sk);
+
+ return ret;
+}
+
/*
* Map data from the skb to a pipe. Should handle both the linear part,
* the fragments, and the frag list. It does NOT handle frag lists within
* the frag list, if such a thing exists. We'd probably need to recurse to
* handle that cleanly.
*/
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
- unsigned int flags)
+ unsigned int flags,
+ ssize_t (*splice_cb)(struct sock *,
+ struct pipe_inode_info *,
+ struct splice_pipe_desc *))
{
struct partial_page partial[MAX_SKB_FRAGS];
struct page *pages[MAX_SKB_FRAGS];
@@ -1939,7 +1915,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
.spd_release = sock_spd_release,
};
struct sk_buff *frag_iter;
- struct sock *sk = skb->sk;
int ret = 0;
/*
@@ -1962,23 +1937,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
}
done:
- if (spd.nr_pages) {
- /*
- * Drop the socket lock, otherwise we have reverse
- * locking dependencies between sk_lock and i_mutex
- * here as compared to sendfile(). We enter here
- * with the socket lock held, and splice_to_pipe() will
- * grab the pipe inode lock. For sendfile() emulation,
- * we call into ->sendpage() with the i_mutex lock held
- * and networking will grab the socket lock.
- */
- release_sock(sk);
- ret = splice_to_pipe(pipe, &spd);
- lock_sock(sk);
- }
+ if (spd.nr_pages)
+ ret = splice_cb(sk, pipe, &spd);
return ret;
}
+EXPORT_SYMBOL_GPL(skb_splice_bits);
/**
* skb_store_bits - store bits from kernel buffer to skb
@@ -2963,6 +2927,24 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
}
EXPORT_SYMBOL(skb_append_datato_frags);
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+ int offset, size_t size)
+{
+ int i = skb_shinfo(skb)->nr_frags;
+
+ if (skb_can_coalesce(skb, i, page, offset)) {
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
+ } else if (i < MAX_SKB_FRAGS) {
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, offset, size);
+ } else {
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(skb_append_pagefrags);
+
/**
* skb_pull_rcsum - pull skb and update receive checksum
* @skb: buffer to update
@@ -4030,6 +4012,93 @@ int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
}
EXPORT_SYMBOL(skb_checksum_setup);
+/**
+ * skb_checksum_maybe_trim - maybe trims the given skb
+ * @skb: the skb to check
+ * @transport_len: the data length beyond the network header
+ *
+ * Checks whether the given skb has data beyond the given transport length.
+ * If so, returns a cloned skb trimmed to this transport length.
+ * Otherwise returns the provided skb. Returns NULL in error cases
+ * (e.g. transport_len exceeds skb length or out-of-memory).
+ *
+ * Caller needs to set the skb transport header and release the returned skb.
+ * Provided skb is consumed.
+ */
+static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
+ unsigned int transport_len)
+{
+ struct sk_buff *skb_chk;
+ unsigned int len = skb_transport_offset(skb) + transport_len;
+ int ret;
+
+ if (skb->len < len) {
+ kfree_skb(skb);
+ return NULL;
+ } else if (skb->len == len) {
+ return skb;
+ }
+
+ skb_chk = skb_clone(skb, GFP_ATOMIC);
+ kfree_skb(skb);
+
+ if (!skb_chk)
+ return NULL;
+
+ ret = pskb_trim_rcsum(skb_chk, len);
+ if (ret) {
+ kfree_skb(skb_chk);
+ return NULL;
+ }
+
+ return skb_chk;
+}
+
+/**
+ * skb_checksum_trimmed - validate checksum of an skb
+ * @skb: the skb to check
+ * @transport_len: the data length beyond the network header
+ * @skb_chkf: checksum function to use
+ *
+ * Applies the given checksum function skb_chkf to the provided skb.
+ * Returns a checked and maybe trimmed skb. Returns NULL on error.
+ *
+ * If the skb has data beyond the given transport length, then a
+ * trimmed & cloned skb is checked and returned.
+ *
+ * Caller needs to set the skb transport header and release the returned skb.
+ * Provided skb is consumed.
+ */
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+ unsigned int transport_len,
+ __sum16(*skb_chkf)(struct sk_buff *skb))
+{
+ struct sk_buff *skb_chk;
+ unsigned int offset = skb_transport_offset(skb);
+ __sum16 ret;
+
+ skb_chk = skb_checksum_maybe_trim(skb, transport_len);
+ if (!skb_chk)
+ return NULL;
+
+ if (!pskb_may_pull(skb_chk, offset)) {
+ kfree_skb(skb_chk);
+ return NULL;
+ }
+
+ __skb_pull(skb_chk, offset);
+ ret = skb_chkf(skb_chk);
+ __skb_push(skb_chk, offset);
+
+ if (ret) {
+ kfree_skb(skb_chk);
+ return NULL;
+ }
+
+ return skb_chk;
+}
+EXPORT_SYMBOL(skb_checksum_trimmed);
+
void __skb_warn_lro_forwarding(const struct sk_buff *skb)
{
net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
diff --git a/net/core/sock.c b/net/core/sock.c
index dc30dc5bb1b8..7063c329c1b6 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1393,9 +1393,10 @@ EXPORT_SYMBOL_GPL(sock_update_netprioidx);
* @family: protocol family
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
* @prot: struct proto associated with this new sock instance
+ * @kern: is this to be a kernel socket?
*/
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
- struct proto *prot)
+ struct proto *prot, int kern)
{
struct sock *sk;
@@ -1408,7 +1409,10 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
*/
sk->sk_prot = sk->sk_prot_creator = prot;
sock_lock_init(sk);
- sock_net_set(sk, get_net(net));
+ sk->sk_net_refcnt = kern ? 0 : 1;
+ if (likely(sk->sk_net_refcnt))
+ get_net(net);
+ sock_net_set(sk, net);
atomic_set(&sk->sk_wmem_alloc, 1);
sock_update_classid(sk);
@@ -1442,7 +1446,8 @@ static void __sk_free(struct sock *sk)
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
- put_net(sock_net(sk));
+ if (likely(sk->sk_net_refcnt))
+ put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk);
}
@@ -1458,25 +1463,6 @@ void sk_free(struct sock *sk)
}
EXPORT_SYMBOL(sk_free);
-/*
- * Last sock_put should drop reference to sk->sk_net. It has already
- * been dropped in sk_change_net. Taking reference to stopping namespace
- * is not an option.
- * Take reference to a socket to remove it from hash _alive_ and after that
- * destroy it in the context of init_net.
- */
-void sk_release_kernel(struct sock *sk)
-{
- if (sk == NULL || sk->sk_socket == NULL)
- return;
-
- sock_hold(sk);
- sock_release(sk->sk_socket);
- sock_net_set(sk, get_net(&init_net));
- sock_put(sk);
-}
-EXPORT_SYMBOL(sk_release_kernel);
-
static void sk_update_clone(const struct sock *sk, struct sock *newsk)
{
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
@@ -1592,6 +1578,8 @@ EXPORT_SYMBOL_GPL(sk_clone_lock);
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
+ u32 max_segs = 1;
+
__sk_dst_set(sk, dst);
sk->sk_route_caps = dst->dev->features;
if (sk->sk_route_caps & NETIF_F_GSO)
@@ -1603,9 +1591,10 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
- sk->sk_gso_max_segs = dst->dev->gso_max_segs;
+ max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
}
}
+ sk->sk_gso_max_segs = max_segs;
}
EXPORT_SYMBOL_GPL(sk_setup_caps);
@@ -2080,12 +2069,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
/**
* __sk_reclaim - reclaim memory_allocated
* @sk: socket
+ * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
*/
-void __sk_mem_reclaim(struct sock *sk)
+void __sk_mem_reclaim(struct sock *sk, int amount)
{
- sk_memory_allocated_sub(sk,
- sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
- sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+ amount >>= SK_MEM_QUANTUM_SHIFT;
+ sk_memory_allocated_sub(sk, amount);
+ sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
if (sk_under_memory_pressure(sk) &&
(sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
diff --git a/net/core/stream.c b/net/core/stream.c
index 301c05f26060..d70f77a0c889 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -119,6 +119,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
int err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
+ bool noblock = (*timeo_p ? false : true);
DEFINE_WAIT(wait);
if (sk_stream_memory_free(sk))
@@ -131,8 +132,11 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto do_error;
- if (!*timeo_p)
+ if (!*timeo_p) {
+ if (noblock)
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
goto do_nonblock;
+ }
if (signal_pending(current))
goto do_interrupted;
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
diff --git a/net/core/utils.c b/net/core/utils.c
index 7b803884c162..a7732a068043 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -304,13 +304,15 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, int pseudohdr)
{
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from),
- to));
+ csum_replace4(sum, from, to);
if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
- skb->csum = ~csum_add(csum_sub(~(skb->csum), from), to);
+ skb->csum = ~csum_add(csum_sub(~(skb->csum),
+ (__force __wsum)from),
+ (__force __wsum)to);
} else if (pseudohdr)
- *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum), from),
- to));
+ *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum),
+ (__force __wsum)from),
+ (__force __wsum)to));
}
EXPORT_SYMBOL(inet_proto_csum_replace4);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 754484b3cd0e..675cf94e04f8 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -468,10 +468,10 @@ static struct proto dn_proto = {
.obj_size = sizeof(struct dn_sock),
};
-static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
+static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
{
struct dn_scp *scp;
- struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
+ struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
if (!sk)
goto out;
@@ -693,7 +693,7 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
}
- if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL)
+ if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
return -ENOBUFS;
sk->sk_protocol = protocol;
@@ -1096,7 +1096,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
cb = DN_SKB_CB(skb);
sk->sk_ack_backlog--;
- newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
+ newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
if (newsk == NULL) {
release_sock(sk);
kfree_skb(skb);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 827cda560a55..04ffad311704 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -345,6 +345,24 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
return ret;
}
+static int dsa_slave_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ int ret = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_STP_STATE:
+ if (attr->trans == SWITCHDEV_TRANS_COMMIT)
+ ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
static int dsa_slave_bridge_port_join(struct net_device *dev,
struct net_device *br)
{
@@ -382,14 +400,20 @@ static int dsa_slave_bridge_port_leave(struct net_device *dev)
return ret;
}
-static int dsa_slave_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+static int dsa_slave_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct dsa_switch *ds = p->parent;
- psid->id_len = sizeof(ds->index);
- memcpy(&psid->id, &ds->index, psid->id_len);
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(ds->index);
+ memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -675,9 +699,9 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_get_iflink = dsa_slave_get_iflink,
};
-static const struct swdev_ops dsa_slave_swdev_ops = {
- .swdev_parent_id_get = dsa_slave_parent_id_get,
- .swdev_port_stp_update = dsa_slave_stp_update,
+static const struct switchdev_ops dsa_slave_switchdev_ops = {
+ .switchdev_port_attr_get = dsa_slave_port_attr_get,
+ .switchdev_port_attr_set = dsa_slave_port_attr_set,
};
static void dsa_slave_adjust_link(struct net_device *dev)
@@ -810,12 +834,19 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
return 0;
}
+static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
+static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &dsa_slave_netdev_xmit_lock_key);
+}
+
int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
- netif_device_detach(slave_dev);
-
if (p->phy) {
phy_stop(p->phy);
p->old_pause = -1;
@@ -859,7 +890,10 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
eth_hw_addr_inherit(slave_dev, master);
slave_dev->tx_queue_len = 0;
slave_dev->netdev_ops = &dsa_slave_netdev_ops;
- slave_dev->swdev_ops = &dsa_slave_swdev_ops;
+ slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
+
+ netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
+ NULL);
SET_NETDEV_DEV(slave_dev, parent);
slave_dev->dev.of_node = ds->pd->port_dn[port];
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f3bad41d725f..77e0f0e7a88e 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -58,6 +58,7 @@
#include <net/ipv6.h>
#include <net/ip.h>
#include <net/dsa.h>
+#include <net/flow_dissector.h>
#include <linux/uaccess.h>
__setup("ether=", netdev_boot_setup);
@@ -130,9 +131,9 @@ u32 eth_get_headlen(void *data, unsigned int len)
return len;
/* parse any remaining L2/L3 headers, check for L4 */
- if (!__skb_flow_dissect(NULL, &keys, data,
- eth->h_proto, sizeof(*eth), len))
- return max_t(u32, keys.thoff, sizeof(*eth));
+ if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
+ sizeof(*eth), len))
+ return max_t(u32, keys.control.thoff, sizeof(*eth));
/* parse for any L4 headers */
return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
@@ -156,10 +157,11 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
+
+ eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
- eth = eth_hdr(skb);
- if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
@@ -178,7 +180,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
if (unlikely(netdev_uses_dsa(dev)))
return htons(ETH_P_XDSA);
- if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
+ if (likely(eth_proto_is_802_3(eth->h_proto)))
return eth->h_proto;
/*
@@ -468,6 +470,7 @@ EXPORT_SYMBOL(eth_gro_complete);
static struct packet_offload eth_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_TEB),
+ .priority = 10,
.callbacks = {
.gro_receive = eth_gro_receive,
.gro_complete = eth_gro_complete,
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 0ae5822ef944..f20a387a1011 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -55,27 +55,6 @@
LIST_HEAD(lowpan_devices);
static int lowpan_open_count;
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
- struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
- return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
@@ -103,12 +82,6 @@ static const struct net_device_ops lowpan_netdev_ops = {
.ndo_start_xmit = lowpan_xmit,
};
-static struct ieee802154_mlme_ops lowpan_mlme = {
- .get_pan_id = lowpan_get_pan_id,
- .get_short_addr = lowpan_get_short_addr,
- .get_dsn = lowpan_get_dsn,
-};
-
static void lowpan_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
@@ -124,7 +97,6 @@ static void lowpan_setup(struct net_device *dev)
dev->netdev_ops = &lowpan_netdev_ops;
dev->header_ops = &lowpan_header_ops;
- dev->ml_priv = &lowpan_mlme;
dev->destructor = free_netdev;
dev->features |= NETIF_F_NETNS_LOCAL;
}
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index 2349070bd534..98acf7319754 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -207,7 +207,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
/* prepare wpan address data */
sa.mode = IEEE802154_ADDR_LONG;
- sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ sa.pan_id = lowpan_dev_info(dev)->real_dev->ieee802154_ptr->pan_id;
sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
/* intra-PAN communications */
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index 2ee00e8a0308..b0248e934230 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -121,8 +121,6 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
/* atomic_inc_return makes it start at 1, make it start at 0 */
rdev->wpan_phy_idx--;
- mutex_init(&rdev->wpan_phy.pib_lock);
-
INIT_LIST_HEAD(&rdev->wpan_dev_list);
device_initialize(&rdev->wpan_phy.dev);
dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 2b4955d7aae5..3503c38954f9 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -97,8 +97,10 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
BUG_ON(!phy);
get_device(&phy->dev);
- short_addr = ops->get_short_addr(dev);
- pan_id = ops->get_pan_id(dev);
+ rtnl_lock();
+ short_addr = dev->ieee802154_ptr->short_addr;
+ pan_id = dev->ieee802154_ptr->pan_id;
+ rtnl_unlock();
if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
@@ -117,12 +119,12 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
rtnl_unlock();
if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
- params.transmit_power) ||
+ params.transmit_power / 100) ||
nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
params.cca.mode) ||
nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
- params.cca_ed_level) ||
+ params.cca_ed_level / 100) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
params.csma_retries) ||
nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
@@ -166,10 +168,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
if (!dev)
return NULL;
- /* Check on mtu is currently a hacked solution because lowpan
- * and wpan have the same ARPHRD type.
- */
- if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) {
+ if (dev->type != ARPHRD_IEEE802154) {
dev_put(dev);
return NULL;
}
@@ -244,7 +243,9 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
addr.mode = IEEE802154_ADDR_LONG;
addr.extended_addr = nla_get_hwaddr(
info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
- addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ rtnl_lock();
+ addr.pan_id = dev->ieee802154_ptr->pan_id;
+ rtnl_unlock();
ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
@@ -281,7 +282,9 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
addr.short_addr = nla_get_shortaddr(
info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
}
- addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ rtnl_lock();
+ addr.pan_id = dev->ieee802154_ptr->pan_id;
+ rtnl_unlock();
ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
@@ -449,11 +452,7 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
for_each_netdev(net, dev) {
- /* Check on mtu is currently a hacked solution because lowpan
- * and wpan have the same ARPHRD type.
- */
- if (idx < s_idx || dev->type != ARPHRD_IEEE802154 ||
- dev->mtu != IEEE802154_MTU)
+ if (idx < s_idx || dev->type != ARPHRD_IEEE802154)
goto cont;
if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
@@ -510,7 +509,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
ops->get_mac_params(dev, &params);
if (info->attrs[IEEE802154_ATTR_TXPOWER])
- params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+ params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100;
if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
@@ -519,7 +518,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
- params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+ params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100;
if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
@@ -783,11 +782,7 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
int rc;
for_each_netdev(net, dev) {
- /* Check on mtu is currently a hacked solution because lowpan
- * and wpan have the same ARPHRD type.
- */
- if (idx < first_dev || dev->type != ARPHRD_IEEE802154 ||
- dev->mtu != IEEE802154_MTU)
+ if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
goto skip;
data.ops = ieee802154_mlme_ops(dev);
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 346c6665d25e..77d73014bde3 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -50,26 +50,26 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
if (!hdr)
goto out;
- mutex_lock(&phy->pib_lock);
+ rtnl_lock();
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
goto nla_put_failure;
for (i = 0; i < 32; i++) {
- if (phy->channels_supported[i])
- buf[pages++] = phy->channels_supported[i] | (i << 27);
+ if (phy->supported.channels[i])
+ buf[pages++] = phy->supported.channels[i] | (i << 27);
}
if (pages &&
nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
pages * sizeof(uint32_t), buf))
goto nla_put_failure;
- mutex_unlock(&phy->pib_lock);
+ rtnl_unlock();
kfree(buf);
genlmsg_end(msg, hdr);
return 0;
nla_put_failure:
- mutex_unlock(&phy->pib_lock);
+ rtnl_unlock();
genlmsg_cancel(msg, hdr);
out:
kfree(buf);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index f3c12f6a4a39..7dbb1f4ce7df 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -207,10 +207,11 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_PAGE] = { .type = NLA_U8, },
[NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
- [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
+ [NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
[NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
[NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
+ [NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
[NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
@@ -225,6 +226,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
[NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
[NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
+
+ [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
};
/* message building helper */
@@ -236,6 +239,28 @@ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
}
static int
+nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask)
+{
+ struct nlattr *nl_flags = nla_nest_start(msg, attr);
+ int i;
+
+ if (!nl_flags)
+ return -ENOBUFS;
+
+ i = 0;
+ while (mask) {
+ if ((mask & 1) && nla_put_flag(msg, i))
+ return -ENOBUFS;
+
+ mask >>= 1;
+ i++;
+ }
+
+ nla_nest_end(msg, nl_flags);
+ return 0;
+}
+
+static int
nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
struct sk_buff *msg)
{
@@ -248,7 +273,7 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
- rdev->wpan_phy.channels_supported[page]))
+ rdev->wpan_phy.supported.channels[page]))
return -ENOBUFS;
}
nla_nest_end(msg, nl_page);
@@ -256,6 +281,92 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
return 0;
}
+static int
+nl802154_put_capabilities(struct sk_buff *msg,
+ struct cfg802154_registered_device *rdev)
+{
+ const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported;
+ struct nlattr *nl_caps, *nl_channels;
+ int i;
+
+ nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS);
+ if (!nl_caps)
+ return -ENOBUFS;
+
+ nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS);
+ if (!nl_channels)
+ return -ENOBUFS;
+
+ for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
+ if (caps->channels[i]) {
+ if (nl802154_put_flags(msg, i, caps->channels[i]))
+ return -ENOBUFS;
+ }
+ }
+
+ nla_nest_end(msg, nl_channels);
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+ struct nlattr *nl_ed_lvls;
+
+ nl_ed_lvls = nla_nest_start(msg,
+ NL802154_CAP_ATTR_CCA_ED_LEVELS);
+ if (!nl_ed_lvls)
+ return -ENOBUFS;
+
+ for (i = 0; i < caps->cca_ed_levels_size; i++) {
+ if (nla_put_s32(msg, i, caps->cca_ed_levels[i]))
+ return -ENOBUFS;
+ }
+
+ nla_nest_end(msg, nl_ed_lvls);
+ }
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+ struct nlattr *nl_tx_pwrs;
+
+ nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS);
+ if (!nl_tx_pwrs)
+ return -ENOBUFS;
+
+ for (i = 0; i < caps->tx_powers_size; i++) {
+ if (nla_put_s32(msg, i, caps->tx_powers[i]))
+ return -ENOBUFS;
+ }
+
+ nla_nest_end(msg, nl_tx_pwrs);
+ }
+
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+ if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES,
+ caps->cca_modes) ||
+ nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS,
+ caps->cca_opts))
+ return -ENOBUFS;
+ }
+
+ if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+ caps->min_csma_backoffs) ||
+ nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+ caps->max_csma_backoffs) ||
+ nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+ caps->min_frame_retries) ||
+ nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+ caps->max_frame_retries) ||
+ nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES,
+ caps->iftypes) ||
+ nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt))
+ return -ENOBUFS;
+
+ nla_nest_end(msg, nl_caps);
+
+ return 0;
+}
+
static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
enum nl802154_commands cmd,
struct sk_buff *msg, u32 portid, u32 seq,
@@ -286,23 +397,38 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
rdev->wpan_phy.current_channel))
goto nla_put_failure;
- /* supported channels array */
+ /* TODO remove this behaviour, we still keep support it for a while
+ * so users can change the behaviour to the new one.
+ */
if (nl802154_send_wpan_phy_channels(rdev, msg))
goto nla_put_failure;
/* cca mode */
- if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
- rdev->wpan_phy.cca.mode))
- goto nla_put_failure;
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+ if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+ rdev->wpan_phy.cca.mode))
+ goto nla_put_failure;
+
+ if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+ if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+ rdev->wpan_phy.cca.opt))
+ goto nla_put_failure;
+ }
+ }
- if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
- if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
- rdev->wpan_phy.cca.opt))
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+ if (nla_put_s32(msg, NL802154_ATTR_TX_POWER,
+ rdev->wpan_phy.transmit_power))
goto nla_put_failure;
}
- if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
- rdev->wpan_phy.transmit_power))
+ if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+ if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL,
+ rdev->wpan_phy.cca_ed_level))
+ goto nla_put_failure;
+ }
+
+ if (nl802154_put_capabilities(msg, rdev))
goto nla_put_failure;
finish:
@@ -575,7 +701,8 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL802154_ATTR_IFTYPE]) {
type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
- if (type > NL802154_IFTYPE_MAX)
+ if (type > NL802154_IFTYPE_MAX ||
+ !(rdev->wpan_phy.supported.iftypes & BIT(type)))
return -EINVAL;
}
@@ -625,7 +752,8 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
/* check 802.15.4 constraints */
- if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL)
+ if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
+ !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
return -EINVAL;
return rdev_set_channel(rdev, page, channel);
@@ -636,12 +764,17 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
struct cfg802154_registered_device *rdev = info->user_ptr[0];
struct wpan_phy_cca cca;
+ if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE))
+ return -EOPNOTSUPP;
+
if (!info->attrs[NL802154_ATTR_CCA_MODE])
return -EINVAL;
cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
/* checking 802.15.4 constraints */
- if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+ if (cca.mode < NL802154_CCA_ENERGY ||
+ cca.mode > NL802154_CCA_ATTR_MAX ||
+ !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode)))
return -EINVAL;
if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
@@ -649,13 +782,58 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
- if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+ if (cca.opt > NL802154_CCA_OPT_ATTR_MAX ||
+ !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt)))
return -EINVAL;
}
return rdev_set_cca_mode(rdev, &cca);
}
+static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ s32 ed_level;
+ int i;
+
+ if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL])
+ return -EINVAL;
+
+ ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]);
+
+ for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) {
+ if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i])
+ return rdev_set_cca_ed_level(rdev, ed_level);
+ }
+
+ return -EINVAL;
+}
+
+static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg802154_registered_device *rdev = info->user_ptr[0];
+ s32 power;
+ int i;
+
+ if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL802154_ATTR_TX_POWER])
+ return -EINVAL;
+
+ power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]);
+
+ for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) {
+ if (power == rdev->wpan_phy.supported.tx_powers[i])
+ return rdev_set_tx_power(rdev, power);
+ }
+
+ return -EINVAL;
+}
+
static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
{
struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -668,14 +846,22 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
/* don't change address fields on monitor */
- if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
- return -EINVAL;
-
- if (!info->attrs[NL802154_ATTR_PAN_ID])
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+ !info->attrs[NL802154_ATTR_PAN_ID])
return -EINVAL;
pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
+ /* TODO
+ * I am not sure about to check here on broadcast pan_id.
+ * Broadcast is a valid setting, comment from 802.15.4:
+ * If this value is 0xffff, the device is not associated.
+ *
+ * This could useful to simple deassociate an device.
+ */
+ if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
+ return -EINVAL;
+
return rdev_set_pan_id(rdev, wpan_dev, pan_id);
}
@@ -691,14 +877,27 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
return -EBUSY;
/* don't change address fields on monitor */
- if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
- return -EINVAL;
-
- if (!info->attrs[NL802154_ATTR_SHORT_ADDR])
+ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+ !info->attrs[NL802154_ATTR_SHORT_ADDR])
return -EINVAL;
short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
+ /* TODO
+ * I am not sure about to check here on broadcast short_addr.
+ * Broadcast is a valid setting, comment from 802.15.4:
+ * A value of 0xfffe indicates that the device has
+ * associated but has not been allocated an address. A
+ * value of 0xffff indicates that the device does not
+ * have a short address.
+ *
+ * I think we should allow to set these settings but
+ * don't allow to allow socket communication with it.
+ */
+ if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
+ short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
+ return -EINVAL;
+
return rdev_set_short_addr(rdev, wpan_dev, short_addr);
}
@@ -722,7 +921,11 @@ nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
/* check 802.15.4 constraints */
- if (max_be < 3 || max_be > 8 || min_be > max_be)
+ if (min_be < rdev->wpan_phy.supported.min_minbe ||
+ min_be > rdev->wpan_phy.supported.max_minbe ||
+ max_be < rdev->wpan_phy.supported.min_maxbe ||
+ max_be > rdev->wpan_phy.supported.max_maxbe ||
+ min_be > max_be)
return -EINVAL;
return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
@@ -747,7 +950,8 @@ nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
/* check 802.15.4 constraints */
- if (max_csma_backoffs > 5)
+ if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs ||
+ max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs)
return -EINVAL;
return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
@@ -771,7 +975,8 @@ nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
/* check 802.15.4 constraints */
- if (max_frame_retries < -1 || max_frame_retries > 7)
+ if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries ||
+ max_frame_retries > rdev->wpan_phy.supported.max_frame_retries)
return -EINVAL;
return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
@@ -791,6 +996,9 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+ if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
+ return -EINVAL;
+
return rdev_set_lbt_mode(rdev, wpan_dev, mode);
}
@@ -937,6 +1145,22 @@ static const struct genl_ops nl802154_ops[] = {
NL802154_FLAG_NEED_RTNL,
},
{
+ .cmd = NL802154_CMD_SET_CCA_ED_LEVEL,
+ .doit = nl802154_set_cca_ed_level,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL802154_CMD_SET_TX_POWER,
+ .doit = nl802154_set_tx_power,
+ .policy = nl802154_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+ NL802154_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL802154_CMD_SET_PAN_ID,
.doit = nl802154_set_pan_id,
.policy = nl802154_policy,
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 7b5a9dd94fe5..b2155a123f6c 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -75,6 +75,29 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
}
static inline int
+rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level)
+{
+ int ret;
+
+ trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level);
+ ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
+rdev_set_tx_power(struct cfg802154_registered_device *rdev,
+ s32 power)
+{
+ int ret;
+
+ trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power);
+ ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power);
+ trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+ return ret;
+}
+
+static inline int
rdev_set_pan_id(struct cfg802154_registered_device *rdev,
struct wpan_dev *wpan_dev, __le16 pan_id)
{
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index b60c65f70346..02abef2c1621 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -64,10 +64,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
if (tmp->type != ARPHRD_IEEE802154)
continue;
- pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
- short_addr =
- ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
+ pan_id = tmp->ieee802154_ptr->pan_id;
+ short_addr = tmp->ieee802154_ptr->short_addr;
if (pan_id == addr->pan_id &&
short_addr == addr->short_addr) {
dev = tmp;
@@ -228,15 +226,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
goto out;
}
- if (dev->type != ARPHRD_IEEE802154) {
- err = -ENODEV;
- goto out_put;
- }
-
sk->sk_bound_dev_if = dev->ifindex;
sk_dst_reset(sk);
-out_put:
dev_put(dev);
out:
release_sock(sk);
@@ -286,7 +278,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
if (size > mtu) {
pr_debug("size = %Zu, mtu = %u\n", size, mtu);
- err = -EINVAL;
+ err = -EMSGSIZE;
goto out_dev;
}
@@ -797,9 +789,9 @@ static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
/* Data frame processing */
BUG_ON(dev->type != ARPHRD_IEEE802154);
- pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
- hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+ pan_id = dev->ieee802154_ptr->pan_id;
+ short_addr = dev->ieee802154_ptr->short_addr;
+ hw_addr = dev->ieee802154_ptr->extended_addr;
read_lock(&dgram_lock);
sk_for_each(sk, &dgram_head) {
@@ -1014,7 +1006,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
}
rc = -ENOMEM;
- sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
+ sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto, kern);
if (!sk)
goto out;
rc = 0;
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 5ac25eb6ed17..73eb7605c1eb 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -1,4 +1,4 @@
-/* Based on net/wireless/tracing.h */
+/* Based on net/wireless/trace.h */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cfg802154
@@ -93,6 +93,21 @@ TRACE_EVENT(802154_rdev_set_channel,
__entry->page, __entry->channel)
);
+TRACE_EVENT(802154_rdev_set_tx_power,
+ TP_PROTO(struct wpan_phy *wpan_phy, s32 power),
+ TP_ARGS(wpan_phy, power),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(s32, power)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->power = power;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", power: %d", WPAN_PHY_PR_ARG,
+ __entry->power)
+);
+
TRACE_EVENT(802154_rdev_set_cca_mode,
TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
TP_ARGS(wpan_phy, cca),
@@ -108,6 +123,21 @@ TRACE_EVENT(802154_rdev_set_cca_mode,
WPAN_CCA_PR_ARG)
);
+TRACE_EVENT(802154_rdev_set_cca_ed_level,
+ TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level),
+ TP_ARGS(wpan_phy, ed_level),
+ TP_STRUCT__entry(
+ WPAN_PHY_ENTRY
+ __field(s32, ed_level)
+ ),
+ TP_fast_assign(
+ WPAN_PHY_ASSIGN;
+ __entry->ed_level = ed_level;
+ ),
+ TP_printk(WPAN_PHY_PR_FMT ", ed_level: %d", WPAN_PHY_PR_ARG,
+ __entry->ed_level)
+);
+
DECLARE_EVENT_CLASS(802154_le16_template,
TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 le16arg),
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index bd2901604842..6fb3c90ad726 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -331,8 +331,8 @@ config NET_FOU_IP_TUNNELS
When this option is enabled IP tunnels can be configured to use
FOU or GUE encapsulation.
-config GENEVE
- tristate "Generic Network Virtualization Encapsulation (Geneve)"
+config GENEVE_CORE
+ tristate "Generic Network Virtualization Encapsulation library"
depends on INET
select NET_UDP_TUNNEL
---help---
@@ -615,6 +615,22 @@ config TCP_CONG_DCTCP
For further details see:
http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
+config TCP_CONG_CDG
+ tristate "CAIA Delay-Gradient (CDG)"
+ default n
+ ---help---
+ CAIA Delay-Gradient (CDG) is a TCP congestion control that modifies
+ the TCP sender in order to:
+
+ o Use the delay gradient as a congestion signal.
+ o Back off with an average probability that is independent of the RTT.
+ o Coexist with flows that use loss-based congestion control.
+ o Tolerate packet loss unrelated to congestion.
+
+ For further details see:
+ D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
+ delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+
choice
prompt "Default TCP congestion control"
default DEFAULT_CUBIC
@@ -646,6 +662,9 @@ choice
config DEFAULT_DCTCP
bool "DCTCP" if TCP_CONG_DCTCP=y
+ config DEFAULT_CDG
+ bool "CDG" if TCP_CONG_CDG=y
+
config DEFAULT_RENO
bool "Reno"
endchoice
@@ -668,6 +687,7 @@ config DEFAULT_TCP_CONG
default "veno" if DEFAULT_VENO
default "reno" if DEFAULT_RENO
default "dctcp" if DEFAULT_DCTCP
+ default "cdg" if DEFAULT_CDG
default "cubic"
config TCP_MD5SIG
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 518c04ed666e..efc43f300b8c 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
+obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
obj-$(CONFIG_TCP_CONG_DCTCP) += tcp_dctcp.o
obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o
@@ -56,7 +57,7 @@ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GENEVE_CORE) += geneve_core.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8b47a4d79d04..cc858ef44451 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -317,7 +317,7 @@ lookup_protocol:
WARN_ON(!answer_prot->slab);
err = -ENOBUFS;
- sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
+ sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
if (!sk)
goto out;
@@ -488,7 +488,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet->inet_saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */
- if (sk->sk_prot->get_port(sk, snum)) {
+ if ((snum || !inet->bind_address_no_port) &&
+ sk->sk_prot->get_port(sk, snum)) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
err = -EADDRINUSE;
goto out_release_sock;
@@ -1430,7 +1431,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
struct net *net)
{
struct socket *sock;
- int rc = sock_create_kern(family, type, protocol, &sock);
+ int rc = sock_create_kern(net, family, type, protocol, &sock);
if (rc == 0) {
*sk = sock->sk;
@@ -1440,8 +1441,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
* we do not wish this socket to see incoming packets.
*/
(*sk)->sk_prot->unhash(*sk);
-
- sk_change_net(*sk, net);
}
return rc;
}
@@ -1597,7 +1596,7 @@ static __net_init int inet_init_net(struct net *net)
*/
seqlock_init(&net->ipv4.ip_local_ports.lock);
net->ipv4.ip_local_ports.range[0] = 32768;
- net->ipv4.ip_local_ports.range[1] = 61000;
+ net->ipv4.ip_local_ports.range[1] = 60999;
seqlock_init(&net->ipv4.ping_group_range.lock);
/*
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8d695b6659c7..28ec3c1823bf 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -713,8 +713,6 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
struct hlist_head *dest;
unsigned int new_hash;
- hlist_del(&fi->fib_hash);
-
new_hash = fib_info_hashfn(fi);
dest = &new_info_hash[new_hash];
hlist_add_head(&fi->fib_hash, dest);
@@ -731,8 +729,6 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
struct hlist_head *ldest;
unsigned int new_hash;
- hlist_del(&fi->fib_lhash);
-
new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
ldest = &new_laddrhash[new_hash];
hlist_add_head(&fi->fib_lhash, ldest);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 09b62e17dd8c..3c699c4e90a4 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -72,6 +72,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -324,13 +325,15 @@ static inline void empty_child_dec(struct key_vector *n)
static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
{
- struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
- struct key_vector *l = kv->kv;
+ struct key_vector *l;
+ struct tnode *kv;
+ kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
if (!kv)
return NULL;
/* initialize key vector */
+ l = kv->kv;
l->key = key;
l->pos = 0;
l->bits = 0;
@@ -345,24 +348,26 @@ static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
static struct key_vector *tnode_new(t_key key, int pos, int bits)
{
- struct tnode *tnode = tnode_alloc(bits);
unsigned int shift = pos + bits;
- struct key_vector *tn = tnode->kv;
+ struct key_vector *tn;
+ struct tnode *tnode;
/* verify bits and pos their msb bits clear and values are valid */
BUG_ON(!bits || (shift > KEYLENGTH));
- pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
- sizeof(struct key_vector *) << bits);
-
+ tnode = tnode_alloc(bits);
if (!tnode)
return NULL;
+ pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
+ sizeof(struct key_vector *) << bits);
+
if (bits == KEYLENGTH)
tnode->full_children = 1;
else
tnode->empty_children = 1ul << bits;
+ tn = tnode->kv;
tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
tn->pos = pos;
tn->bits = bits;
@@ -1166,13 +1171,13 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->fa_slen = fa->fa_slen;
new_fa->tb_id = tb->tb_id;
- err = netdev_switch_fib_ipv4_add(key, plen, fi,
- new_fa->fa_tos,
- cfg->fc_type,
- cfg->fc_nlflags,
- tb->tb_id);
+ err = switchdev_fib_ipv4_add(key, plen, fi,
+ new_fa->fa_tos,
+ cfg->fc_type,
+ cfg->fc_nlflags,
+ tb->tb_id);
if (err) {
- netdev_switch_fib_ipv4_abort(fi);
+ switchdev_fib_ipv4_abort(fi);
kmem_cache_free(fn_alias_kmem, new_fa);
goto out;
}
@@ -1216,12 +1221,10 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
new_fa->tb_id = tb->tb_id;
/* (Optionally) offload fib entry to switch hardware. */
- err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
- cfg->fc_type,
- cfg->fc_nlflags,
- tb->tb_id);
+ err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
+ cfg->fc_nlflags, tb->tb_id);
if (err) {
- netdev_switch_fib_ipv4_abort(fi);
+ switchdev_fib_ipv4_abort(fi);
goto out_free_new_fa;
}
@@ -1240,7 +1243,7 @@ succeeded:
return 0;
out_sw_fib_del:
- netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
+ switchdev_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
out_free_new_fa:
kmem_cache_free(fn_alias_kmem, new_fa);
out:
@@ -1518,8 +1521,8 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
if (!fa_to_delete)
return -ESRCH;
- netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
- cfg->fc_type, tb->tb_id);
+ switchdev_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
+ cfg->fc_type, tb->tb_id);
rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
&cfg->fc_nlinfo, 0);
@@ -1768,10 +1771,9 @@ void fib_table_flush_external(struct fib_table *tb)
if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
continue;
- netdev_switch_fib_ipv4_del(n->key,
- KEYLENGTH - fa->fa_slen,
- fi, fa->fa_tos,
- fa->fa_type, tb->tb_id);
+ switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
+ fi, fa->fa_tos, fa->fa_type,
+ tb->tb_id);
}
/* update leaf slen */
@@ -1836,10 +1838,9 @@ int fib_table_flush(struct fib_table *tb)
continue;
}
- netdev_switch_fib_ipv4_del(n->key,
- KEYLENGTH - fa->fa_slen,
- fi, fa->fa_tos,
- fa->fa_type, tb->tb_id);
+ switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
+ fi, fa->fa_tos, fa->fa_type,
+ tb->tb_id);
hlist_del_rcu(&fa->fa_list);
fib_release_info(fa->fa_info);
alias_free_mem_rcu(fa);
@@ -2057,11 +2058,12 @@ static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
- struct key_vector *n, *pn = t->kv;
+ struct key_vector *n, *pn;
if (!t)
return NULL;
+ pn = t->kv;
n = rcu_dereference(pn->tnode[0]);
if (!n)
return NULL;
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve_core.c
index 8986e63f3bda..311a4ba6950a 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve_core.c
@@ -60,11 +60,6 @@ struct geneve_net {
static int geneve_net_id;
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
- return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
static struct geneve_sock *geneve_find_sock(struct net *net,
sa_family_t family, __be16 port)
{
@@ -435,7 +430,7 @@ static int __init geneve_init_module(void)
if (rc)
return rc;
- pr_info("Geneve driver\n");
+ pr_info("Geneve core logic\n");
return 0;
}
@@ -449,5 +444,4 @@ module_exit(geneve_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
-MODULE_DESCRIPTION("Driver for GENEVE encapsulated traffic");
-MODULE_ALIAS_RTNL_LINK("geneve");
+MODULE_DESCRIPTION("Driver library for GENEVE encapsulated traffic");
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a3a697f5ffba..651cdf648ec4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1339,6 +1339,168 @@ out:
}
EXPORT_SYMBOL(ip_mc_inc_group);
+static int ip_mc_check_iphdr(struct sk_buff *skb)
+{
+ const struct iphdr *iph;
+ unsigned int len;
+ unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
+
+ if (!pskb_may_pull(skb, offset))
+ return -EINVAL;
+
+ iph = ip_hdr(skb);
+
+ if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
+ return -EINVAL;
+
+ offset += ip_hdrlen(skb) - sizeof(*iph);
+
+ if (!pskb_may_pull(skb, offset))
+ return -EINVAL;
+
+ iph = ip_hdr(skb);
+
+ if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+ return -EINVAL;
+
+ len = skb_network_offset(skb) + ntohs(iph->tot_len);
+ if (skb->len < len || len < offset)
+ return -EINVAL;
+
+ skb_set_transport_header(skb, offset);
+
+ return 0;
+}
+
+static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
+{
+ unsigned int len = skb_transport_offset(skb);
+
+ len += sizeof(struct igmpv3_report);
+
+ return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+}
+
+static int ip_mc_check_igmp_query(struct sk_buff *skb)
+{
+ unsigned int len = skb_transport_offset(skb);
+
+ len += sizeof(struct igmphdr);
+ if (skb->len < len)
+ return -EINVAL;
+
+ /* IGMPv{1,2}? */
+ if (skb->len != len) {
+ /* or IGMPv3? */
+ len += sizeof(struct igmpv3_query) - sizeof(struct igmphdr);
+ if (skb->len < len || !pskb_may_pull(skb, len))
+ return -EINVAL;
+ }
+
+ /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
+ * all-systems destination addresses (224.0.0.1) for general queries
+ */
+ if (!igmp_hdr(skb)->group &&
+ ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ip_mc_check_igmp_msg(struct sk_buff *skb)
+{
+ switch (igmp_hdr(skb)->type) {
+ case IGMP_HOST_LEAVE_MESSAGE:
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ /* fall through */
+ return 0;
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ return ip_mc_check_igmp_reportv3(skb);
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ return ip_mc_check_igmp_query(skb);
+ default:
+ return -ENOMSG;
+ }
+}
+
+static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
+{
+ return skb_checksum_simple_validate(skb);
+}
+
+static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+
+{
+ struct sk_buff *skb_chk;
+ unsigned int transport_len;
+ unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
+ int ret;
+
+ transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
+
+ skb_get(skb);
+ skb_chk = skb_checksum_trimmed(skb, transport_len,
+ ip_mc_validate_checksum);
+ if (!skb_chk)
+ return -EINVAL;
+
+ if (!pskb_may_pull(skb_chk, len)) {
+ kfree_skb(skb_chk);
+ return -EINVAL;
+ }
+
+ ret = ip_mc_check_igmp_msg(skb_chk);
+ if (ret) {
+ kfree_skb(skb_chk);
+ return ret;
+ }
+
+ if (skb_trimmed)
+ *skb_trimmed = skb_chk;
+ else
+ kfree_skb(skb_chk);
+
+ return 0;
+}
+
+/**
+ * ip_mc_check_igmp - checks whether this is a sane IGMP packet
+ * @skb: the skb to validate
+ * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
+ *
+ * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
+ * skb network and transport headers accordingly and returns zero.
+ *
+ * -EINVAL: A broken packet was detected, i.e. it violates some internet
+ * standard
+ * -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
+ * -ENOMEM: A memory allocation failure happened.
+ *
+ * Optionally, an skb pointer might be provided via skb_trimmed (or set it
+ * to NULL): After parsing an IGMP packet successfully it will point to
+ * an skb which has its tail aligned to the IP packet end. This might
+ * either be the originally provided skb or a trimmed, cloned version if
+ * the skb frame had data beyond the IP packet. A cloned skb allows us
+ * to leave the original skb and its full frame unchanged (which might be
+ * desirable for layer 2 frame jugglers).
+ *
+ * The caller needs to release a reference count from any returned skb_trimmed.
+ */
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+{
+ int ret = ip_mc_check_iphdr(skb);
+
+ if (ret < 0)
+ return ret;
+
+ if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
+ return -ENOMSG;
+
+ return __ip_mc_check_igmp(skb, skb_trimmed);
+}
+EXPORT_SYMBOL(ip_mc_check_igmp);
+
/*
* Resend IGMP JOIN report; used by netdev notifier.
*/
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8976ca423a07..60021d0d9326 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -99,6 +99,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct net *net = sock_net(sk);
int smallest_size = -1, smallest_rover;
kuid_t uid = sock_i_uid(sk);
+ int attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
local_bh_disable();
if (!snum) {
@@ -106,6 +107,14 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
again:
inet_get_local_port_range(net, &low, &high);
+ if (attempt_half) {
+ int half = low + ((high - low) >> 1);
+
+ if (attempt_half == 1)
+ high = half;
+ else
+ low = half;
+ }
remaining = (high - low) + 1;
smallest_rover = rover = prandom_u32() % remaining + low;
@@ -127,11 +136,6 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
- !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
- snum = smallest_rover;
- goto tb_found;
- }
}
if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
snum = rover;
@@ -159,6 +163,11 @@ again:
snum = smallest_rover;
goto have_snum;
}
+ if (attempt_half == 1) {
+ /* OK we now try the upper half of the range */
+ attempt_half = 2;
+ goto again;
+ }
goto fail;
}
/* OK, here is the one we will use. HEAD is
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c6fb80bd5826..5f9b063bbe8a 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <linux/vmalloc.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
@@ -90,10 +91,6 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum)
{
- struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
-
- atomic_inc(&hashinfo->bsockets);
-
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &tb->owners);
tb->num_owners++;
@@ -111,8 +108,6 @@ static void __inet_put_port(struct sock *sk)
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb;
- atomic_dec(&hashinfo->bsockets);
-
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk);
@@ -399,9 +394,10 @@ not_unique:
return -EADDRNOTAVAIL;
}
-static inline u32 inet_sk_port_offset(const struct sock *sk)
+static u32 inet_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
+
return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
inet->inet_daddr,
inet->inet_dport);
@@ -507,8 +503,14 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
+ /* By starting with offset being an even number,
+ * we tend to leave about 50% of ports for other uses,
+ * like bind(0).
+ */
+ offset &= ~1;
+
local_bh_disable();
- for (i = 1; i <= remaining; i++) {
+ for (i = 0; i < remaining; i++) {
port = low + (i + offset) % remaining;
if (inet_is_local_reserved_port(net, port))
continue;
@@ -552,7 +554,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
return -EADDRNOTAVAIL;
ok:
- hint += i;
+ hint += (i + 2) & ~1;
/* Head lock still held and bh's disabled */
inet_bind_hash(sk, tb, port);
@@ -599,7 +601,11 @@ out:
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
- return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
+ u32 port_offset = 0;
+
+ if (!inet_sk(sk)->inet_num)
+ port_offset = inet_sk_port_offset(sk);
+ return __inet_hash_connect(death_row, sk, port_offset,
__inet_check_established);
}
EXPORT_SYMBOL_GPL(inet_hash_connect);
@@ -608,7 +614,6 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
{
int i;
- atomic_set(&h->bsockets, 0);
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
@@ -616,3 +621,33 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
}
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
+
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
+{
+ unsigned int i, nblocks = 1;
+
+ if (sizeof(spinlock_t) != 0) {
+ /* allocate 2 cache lines or at least one spinlock per cpu */
+ nblocks = max_t(unsigned int,
+ 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
+ 1);
+ nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
+
+ /* no more locks than number of hash buckets */
+ nblocks = min(nblocks, hashinfo->ehash_mask + 1);
+
+ hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!hashinfo->ehash_locks)
+ hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
+
+ if (!hashinfo->ehash_locks)
+ return -ENOMEM;
+
+ for (i = 0; i < nblocks; i++)
+ spin_lock_init(&hashinfo->ehash_locks[i]);
+ }
+ hashinfo->ehash_locks_mask = nblocks - 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 00ec8d5d7e7e..2ffbd16b79e0 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -170,7 +170,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
}
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
-void tw_timer_handler(unsigned long data)
+static void tw_timer_handler(unsigned long data)
{
struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3674484946a5..2d3aa408fbdc 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,17 +39,21 @@
#include <net/route.h>
#include <net/xfrm.h>
-static bool ip_may_fragment(const struct sk_buff *skb)
-{
- return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
- skb->ignore_df;
-}
-
static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
+ if (unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0))
+ return false;
+
+ /* original fragment exceeds mtu and DF is set */
+ if (unlikely(IPCB(skb)->frag_max_size > mtu))
+ return true;
+
+ if (skb->ignore_df)
+ return false;
+
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
return false;
@@ -114,7 +118,7 @@ int ip_forward(struct sk_buff *skb)
IPCB(skb)->flags |= IPSKB_FORWARDED;
mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
- if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
+ if (ip_exceeds_mtu(skb, mtu)) {
IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index cc1da6d9cb35..a50dc6d408d1 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -75,6 +75,7 @@ struct ipq {
__be16 id;
u8 protocol;
u8 ecn; /* RFC3168 support */
+ u16 max_df_size; /* largest frag with DF set seen */
int iif;
unsigned int rid;
struct inet_peer *peer;
@@ -173,6 +174,15 @@ static void ipq_kill(struct ipq *ipq)
inet_frag_kill(&ipq->q, &ip4_frags);
}
+static bool frag_expire_skip_icmp(u32 user)
+{
+ return user == IP_DEFRAG_AF_PACKET ||
+ ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
+ __IP_DEFRAG_CONNTRACK_IN_END) ||
+ ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
+}
+
/*
* Oops, a fragment queue timed out. Kill it and send an ICMP reply.
*/
@@ -217,10 +227,8 @@ static void ip_expire(unsigned long arg)
/* Only an end host needs to send an ICMP
* "Fragment Reassembly Timeout" message, per RFC792.
*/
- if (qp->user == IP_DEFRAG_AF_PACKET ||
- ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
- (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
- (skb_rtable(head)->rt_type != RTN_LOCAL)))
+ if (frag_expire_skip_icmp(qp->user) &&
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
goto out_rcu_unlock;
/* Send an ICMP "Fragment Reassembly Timeout" message. */
@@ -319,6 +327,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
struct sk_buff *prev, *next;
struct net_device *dev;
+ unsigned int fragsize;
int flags, offset;
int ihl, end;
int err = -ENOENT;
@@ -474,9 +483,14 @@ found:
if (offset == 0)
qp->q.flags |= INET_FRAG_FIRST_IN;
+ fragsize = skb->len + ihl;
+
+ if (fragsize > qp->q.max_size)
+ qp->q.max_size = fragsize;
+
if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
- skb->len + ihl > qp->q.max_size)
- qp->q.max_size = skb->len + ihl;
+ fragsize > qp->max_df_size)
+ qp->max_df_size = fragsize;
if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
qp->q.meat == qp->q.len) {
@@ -606,13 +620,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
head->next = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
- IPCB(head)->frag_max_size = qp->q.max_size;
+ IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
iph = ip_hdr(head);
- /* max_size != 0 implies at least one fragment had IP_DF set */
- iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
iph->tot_len = htons(len);
iph->tos |= ecn;
+
+ /* When we set IP_DF on a refragmented skb we must also force a
+ * call to ip_fragment to avoid forwarding a DF-skb of size s while
+ * original sender only sent fragments of size f (where f < s).
+ *
+ * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
+ * frag seen to avoid sending tiny DF-fragments in case skb was built
+ * from one very small df-fragment and one large non-df frag.
+ */
+ if (qp->max_df_size == qp->q.max_size) {
+ IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+ iph->frag_off = htons(IP_DF);
+ } else {
+ iph->frag_off = 0;
+ }
+
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c65b93a7b711..55f3c2e94357 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -83,6 +83,10 @@
int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
EXPORT_SYMBOL(sysctl_ip_default_ttl);
+static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+ unsigned int mtu,
+ int (*output)(struct sock *, struct sk_buff *));
+
/* Generate a checksum for an outgoing IP datagram. */
void ip_send_check(struct iphdr *iph)
{
@@ -91,7 +95,7 @@ void ip_send_check(struct iphdr *iph)
}
EXPORT_SYMBOL(ip_send_check);
-int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
@@ -168,7 +172,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
}
EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
-static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
@@ -216,7 +220,8 @@ static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
return -EINVAL;
}
-static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb,
+ unsigned int mtu)
{
netdev_features_t features;
struct sk_buff *segs;
@@ -224,7 +229,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
/* common case: locally created skb or seglen is <= mtu */
if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
- skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+ skb_gso_network_seglen(skb) <= mtu)
return ip_finish_output2(sk, skb);
/* Slowpath - GSO segment length is exceeding the dst MTU.
@@ -248,7 +253,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
int err;
segs->next = NULL;
- err = ip_fragment(sk, segs, ip_finish_output2);
+ err = ip_fragment(sk, segs, mtu, ip_finish_output2);
if (err && ret == 0)
ret = err;
@@ -260,6 +265,8 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
{
+ unsigned int mtu;
+
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm) {
@@ -267,11 +274,12 @@ static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
return dst_output_sk(sk, skb);
}
#endif
+ mtu = ip_skb_dst_mtu(skb);
if (skb_is_gso(skb))
- return ip_finish_output_gso(sk, skb);
+ return ip_finish_output_gso(sk, skb, mtu);
- if (skb->len > ip_skb_dst_mtu(skb))
- return ip_fragment(sk, skb, ip_finish_output2);
+ if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+ return ip_fragment(sk, skb, mtu, ip_finish_output2);
return ip_finish_output2(sk, skb);
}
@@ -478,6 +486,31 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
skb_copy_secmark(to, from);
}
+static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+ unsigned int mtu,
+ int (*output)(struct sock *, struct sk_buff *))
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ if ((iph->frag_off & htons(IP_DF)) == 0)
+ return ip_do_fragment(sk, skb, output);
+
+ if (unlikely(!skb->ignore_df ||
+ (IPCB(skb)->frag_max_size &&
+ IPCB(skb)->frag_max_size > mtu))) {
+ struct rtable *rt = skb_rtable(skb);
+ struct net_device *dev = rt->dst.dev;
+
+ IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ return ip_do_fragment(sk, skb, output);
+}
+
/*
* This IP datagram is too large to be sent in one piece. Break it up into
* smaller pieces (each of size equal to IP header plus
@@ -485,8 +518,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
* single device frame, and queue such a frame for sending.
*/
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
- int (*output)(struct sock *, struct sk_buff *))
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct sock *, struct sk_buff *))
{
struct iphdr *iph;
int ptr;
@@ -507,15 +540,8 @@ int ip_fragment(struct sock *sk, struct sk_buff *skb,
iph = ip_hdr(skb);
mtu = ip_skb_dst_mtu(skb);
- if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
- (IPCB(skb)->frag_max_size &&
- IPCB(skb)->frag_max_size > mtu))) {
- IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
- kfree_skb(skb);
- return -EMSGSIZE;
- }
+ if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
+ mtu = IPCB(skb)->frag_max_size;
/*
* Setup starting values.
@@ -711,6 +737,9 @@ slow_path:
iph = ip_hdr(skb2);
iph->frag_off = htons((offset >> 3));
+ if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
+ iph->frag_off |= htons(IP_DF);
+
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
@@ -751,7 +780,7 @@ fail:
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
}
-EXPORT_SYMBOL(ip_fragment);
+EXPORT_SYMBOL(ip_do_fragment);
int
ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
@@ -1217,11 +1246,9 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
}
while (size > 0) {
- int i;
-
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb)) {
len = size;
- else {
+ } else {
/* Check if the remaining data fits into current packet. */
len = mtu - skb->len;
@@ -1273,15 +1300,10 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
continue;
}
- i = skb_shinfo(skb)->nr_frags;
if (len > size)
len = size;
- if (skb_can_coalesce(skb, i, page, offset)) {
- skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
- } else if (i < MAX_SKB_FRAGS) {
- get_page(page);
- skb_fill_page_desc(skb, i, page, offset, len);
- } else {
+
+ if (skb_append_pagefrags(skb, page, offset, len)) {
err = -EMSGSIZE;
goto error;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 7cfb0893f263..04ae2992a5cd 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -582,6 +582,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
case IP_TRANSPARENT:
case IP_MINTTL:
case IP_NODEFRAG:
+ case IP_BIND_ADDRESS_NO_PORT:
case IP_UNICAST_IF:
case IP_MULTICAST_TTL:
case IP_MULTICAST_ALL:
@@ -732,6 +733,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
}
inet->nodefrag = val ? 1 : 0;
break;
+ case IP_BIND_ADDRESS_NO_PORT:
+ inet->bind_address_no_port = val ? 1 : 0;
+ break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
goto e_inval;
@@ -1324,6 +1328,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_NODEFRAG:
val = inet->nodefrag;
break;
+ case IP_BIND_ADDRESS_NO_PORT:
+ val = inet->bind_address_no_port;
+ break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index ce63ab21b6cd..6a51a71a6c67 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -98,7 +98,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
return -ENOMEM;
eh = (struct ethhdr *)skb->data;
- if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+ if (likely(eth_proto_is_802_3(eh->h_proto)))
skb->protocol = eh->h_proto;
else
skb->protocol = htons(ETH_P_802_2);
@@ -165,6 +165,8 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
{
int i;
+ netdev_stats_to_stats64(tot, &dev->stats);
+
for_each_possible_cpu(i) {
const struct pcpu_sw_netstats *tstats =
per_cpu_ptr(dev->tstats, i);
@@ -185,22 +187,6 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
tot->tx_bytes += tx_bytes;
}
- tot->multicast = dev->stats.multicast;
-
- tot->rx_crc_errors = dev->stats.rx_crc_errors;
- tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
- tot->rx_length_errors = dev->stats.rx_length_errors;
- tot->rx_frame_errors = dev->stats.rx_frame_errors;
- tot->rx_errors = dev->stats.rx_errors;
-
- tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
- tot->tx_dropped = dev->stats.tx_dropped;
- tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
- tot->tx_errors = dev->stats.tx_errors;
-
- tot->collisions = dev->stats.collisions;
-
return tot;
}
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ff96396ebec5..254238daf58b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -251,7 +251,8 @@ ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
- p.i_key = p.o_key = p.i_flags = p.o_flags = 0;
+ p.i_key = p.o_key = 0;
+ p.i_flags = p.o_flags = 0;
if (p.iph.ttl)
p.iph.frag_off |= htons(IP_DF);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2d0e265fef6e..e7abf5145edc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1444,7 +1444,6 @@ static int
compat_find_calc_match(struct xt_entry_match *m,
const char *name,
const struct ipt_ip *ip,
- unsigned int hookmask,
int *size)
{
struct xt_match *match;
@@ -1513,8 +1512,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
- ret = compat_find_calc_match(ematch, name,
- &e->ip, e->comefrom, &off);
+ ret = compat_find_calc_match(ematch, name, &e->ip, &off);
if (ret != 0)
goto release_matches;
++j;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 771ab3d01ad3..45cb16a6a4a3 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -367,6 +367,11 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
struct clusterip_config *config;
int ret;
+ if (par->nft_compat) {
+ pr_err("cannot use CLUSTERIP target from nftables compat\n");
+ return -EOPNOTSUPP;
+ }
+
if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index e9e67793055f..fe8cc183411e 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -18,7 +18,7 @@
#include <net/netfilter/nf_conntrack_synproxy.h>
static struct iphdr *
-synproxy_build_ip(struct sk_buff *skb, u32 saddr, u32 daddr)
+synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
{
struct iphdr *iph;
@@ -220,7 +220,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
nth->ack_seq = th->ack_seq;
tcp_flag_word(nth) = TCP_FLAG_ACK;
nth->doff = tcp_hdr_size / 4;
- nth->window = ntohs(htons(th->window) >> opts->wscale);
+ nth->window = htons(ntohs(th->window) >> opts->wscale);
nth->check = 0;
nth->urg_ptr = 0;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index e1f3b911dd1e..da5d483e236a 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -298,6 +298,8 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
+ SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
+ SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f45f2a12f37b..f6055984c307 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,12 +457,9 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
}
#define IP_IDENTS_SZ 2048u
-struct ip_ident_bucket {
- atomic_t id;
- u32 stamp32;
-};
-static struct ip_ident_bucket *ip_idents __read_mostly;
+static atomic_t *ip_idents __read_mostly;
+static u32 *ip_tstamps __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
* if one generator is seldom used. This makes hard for an attacker
@@ -470,15 +467,16 @@ static struct ip_ident_bucket *ip_idents __read_mostly;
*/
u32 ip_idents_reserve(u32 hash, int segs)
{
- struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
- u32 old = ACCESS_ONCE(bucket->stamp32);
+ u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
+ atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
+ u32 old = ACCESS_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 delta = 0;
- if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+ if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = prandom_u32_max(now - old);
- return atomic_add_return(segs + delta, &bucket->id) - segs;
+ return atomic_add_return(segs + delta, p_id) - segs;
}
EXPORT_SYMBOL(ip_idents_reserve);
@@ -2097,7 +2095,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
goto out;
}
if (ipv4_is_local_multicast(fl4->daddr) ||
- ipv4_is_lbcast(fl4->daddr)) {
+ ipv4_is_lbcast(fl4->daddr) ||
+ fl4->flowi4_proto == IPPROTO_IGMP) {
if (!fl4->saddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
@@ -2742,6 +2741,10 @@ int __init ip_rt_init(void)
prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
+ ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
+ if (!ip_tstamps)
+ panic("IP: failed to allocate ip_tstamps\n");
+
for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index df849e5a10f1..d70b1f603692 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -219,9 +219,9 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
}
EXPORT_SYMBOL_GPL(__cookie_v4_check);
-static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst)
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
@@ -235,7 +235,7 @@ static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
}
return child;
}
-
+EXPORT_SYMBOL(tcp_get_cookie_sock);
/*
* when syncookies are in effect and tcp timestamps are enabled we stored
@@ -391,7 +391,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
- ret = get_cookie_sock(sk, skb, req, &rt->dst);
+ ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index c3852a7ff3c7..433231ccfb17 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,11 +41,19 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
{
+ bool same_parity = !((range[0] ^ range[1]) & 1);
+
write_seqlock(&net->ipv4.ip_local_ports.lock);
+ if (same_parity && !net->ipv4.ip_local_ports.warned) {
+ net->ipv4.ip_local_ports.warned = true;
+ pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
+ }
net->ipv4.ip_local_ports.range[0] = range[0];
net->ipv4.ip_local_ports.range[1] = range[1];
write_sequnlock(&net->ipv4.ip_local_ports.lock);
@@ -522,7 +530,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_tcp_wmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &one,
+ .extra1 = &min_sndbuf,
},
{
.procname = "tcp_notsent_lowat",
@@ -537,7 +545,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_tcp_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &one,
+ .extra1 = &min_rcvbuf,
},
{
.procname = "tcp_app_win",
@@ -702,7 +710,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = &one,
.extra2 = &gso_max_segs,
},
{
@@ -750,7 +758,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_udp_rmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &one
+ .extra1 = &min_rcvbuf,
},
{
.procname = "udp_wmem_min",
@@ -758,7 +766,7 @@ static struct ctl_table ipv4_table[] = {
.maxlen = sizeof(sysctl_udp_wmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &one
+ .extra1 = &min_sndbuf,
},
{ }
};
@@ -821,6 +829,13 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_ecn_fallback",
+ .data = &init_net.ipv4.sysctl_tcp_ecn_fallback,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "ip_local_port_range",
.maxlen = sizeof(init_net.ipv4.ip_local_ports.range),
.data = &init_net.ipv4.ip_local_ports.range,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f1377f2a0472..65f791f74845 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -695,8 +695,9 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
struct tcp_splice_state *tss = rd_desc->arg.data;
int ret;
- ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
- tss->flags);
+ ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
+ min(rd_desc->count, len), tss->flags,
+ skb_socket_splice);
if (ret > 0)
rd_desc->count -= ret;
return ret;
@@ -809,16 +810,28 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
}
EXPORT_SYMBOL(tcp_splice_read);
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+ bool force_schedule)
{
struct sk_buff *skb;
/* The TCP header must be at least 32-bit aligned. */
size = ALIGN(size, 4);
+ if (unlikely(tcp_under_memory_pressure(sk)))
+ sk_mem_reclaim_partial(sk);
+
skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
- if (skb) {
- if (sk_wmem_schedule(sk, skb->truesize)) {
+ if (likely(skb)) {
+ bool mem_scheduled;
+
+ if (force_schedule) {
+ mem_scheduled = true;
+ sk_forced_mem_schedule(sk, skb->truesize);
+ } else {
+ mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
+ }
+ if (likely(mem_scheduled)) {
skb_reserve(skb, sk->sk_prot->max_header);
/*
* Make sure that we have exactly size bytes
@@ -908,7 +921,8 @@ new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+ skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
+ skb_queue_empty(&sk->sk_write_queue));
if (!skb)
goto wait_for_memory;
@@ -987,6 +1001,9 @@ do_error:
if (copied)
goto out;
out_err:
+ /* make sure we wake any epoll edge trigger waiter */
+ if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
+ sk->sk_write_space(sk);
return sk_stream_error(sk, flags, err);
}
@@ -1144,7 +1161,8 @@ new_segment:
skb = sk_stream_alloc_skb(sk,
select_size(sk, sg),
- sk->sk_allocation);
+ sk->sk_allocation,
+ skb_queue_empty(&sk->sk_write_queue));
if (!skb)
goto wait_for_memory;
@@ -1275,6 +1293,9 @@ do_error:
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
+ /* make sure we wake any epoll edge trigger waiter */
+ if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
+ sk->sk_write_space(sk);
release_sock(sk);
return err;
}
@@ -2483,6 +2504,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
icsk->icsk_syn_retries = val;
break;
+ case TCP_SAVE_SYN:
+ if (val < 0 || val > 1)
+ err = -EINVAL;
+ else
+ tp->save_syn = val;
+ break;
+
case TCP_LINGER2:
if (val < 0)
tp->linger2 = -1;
@@ -2672,6 +2700,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_bytes_acked = tp->bytes_acked;
info->tcpi_bytes_received = tp->bytes_received;
} while (u64_stats_fetch_retry_irq(&tp->syncp, start));
+ info->tcpi_segs_out = tp->segs_out;
+ info->tcpi_segs_in = tp->segs_in;
}
EXPORT_SYMBOL_GPL(tcp_get_info);
@@ -2821,6 +2851,42 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_NOTSENT_LOWAT:
val = tp->notsent_lowat;
break;
+ case TCP_SAVE_SYN:
+ val = tp->save_syn;
+ break;
+ case TCP_SAVED_SYN: {
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ lock_sock(sk);
+ if (tp->saved_syn) {
+ if (len < tp->saved_syn[0]) {
+ if (put_user(tp->saved_syn[0], optlen)) {
+ release_sock(sk);
+ return -EFAULT;
+ }
+ release_sock(sk);
+ return -EINVAL;
+ }
+ len = tp->saved_syn[0];
+ if (put_user(len, optlen)) {
+ release_sock(sk);
+ return -EFAULT;
+ }
+ if (copy_to_user(optval, tp->saved_syn + 1, len)) {
+ release_sock(sk);
+ return -EFAULT;
+ }
+ tcp_saved_syn_free(tp);
+ release_sock(sk);
+ } else {
+ release_sock(sk);
+ len = 0;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ }
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -3025,11 +3091,12 @@ __setup("thash_entries=", set_thash_entries);
static void __init tcp_init_mem(void)
{
- unsigned long limit = nr_free_buffer_pages() / 8;
+ unsigned long limit = nr_free_buffer_pages() / 16;
+
limit = max(limit, 128UL);
- sysctl_tcp_mem[0] = limit / 4 * 3;
- sysctl_tcp_mem[1] = limit;
- sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
+ sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */
+ sysctl_tcp_mem[1] = limit; /* 6.25 % */
+ sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */
}
void __init tcp_init(void)
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
new file mode 100644
index 000000000000..a52ce2db53f2
--- /dev/null
+++ b/net/ipv4/tcp_cdg.c
@@ -0,0 +1,433 @@
+/*
+ * CAIA Delay-Gradient (CDG) congestion control
+ *
+ * This implementation is based on the paper:
+ * D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
+ * delay gradients." In IFIP Networking, pages 328-341. Springer, 2011.
+ *
+ * Scavenger traffic (Less-than-Best-Effort) should disable coexistence
+ * heuristics using parameters use_shadow=0 and use_ineff=0.
+ *
+ * Parameters window, backoff_beta, and backoff_factor are crucial for
+ * throughput and delay. Future work is needed to determine better defaults,
+ * and to provide guidelines for use in different environments/contexts.
+ *
+ * Except for window, knobs are configured via /sys/module/tcp_cdg/parameters/.
+ * Parameter window is only configurable when loading tcp_cdg as a module.
+ *
+ * Notable differences from paper/FreeBSD:
+ * o Using Hybrid Slow start and Proportional Rate Reduction.
+ * o Add toggle for shadow window mechanism. Suggested by David Hayes.
+ * o Add toggle for non-congestion loss tolerance.
+ * o Scaling parameter G is changed to a backoff factor;
+ * conversion is given by: backoff_factor = 1000/(G * window).
+ * o Limit shadow window to 2 * cwnd, or to cwnd when application limited.
+ * o More accurate e^-x.
+ */
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/module.h>
+#include <net/tcp.h>
+
+#define HYSTART_ACK_TRAIN 1
+#define HYSTART_DELAY 2
+
+static int window __read_mostly = 8;
+static unsigned int backoff_beta __read_mostly = 0.7071 * 1024; /* sqrt 0.5 */
+static unsigned int backoff_factor __read_mostly = 42;
+static unsigned int hystart_detect __read_mostly = 3;
+static unsigned int use_ineff __read_mostly = 5;
+static bool use_shadow __read_mostly = true;
+static bool use_tolerance __read_mostly;
+
+module_param(window, int, 0444);
+MODULE_PARM_DESC(window, "gradient window size (power of two <= 256)");
+module_param(backoff_beta, uint, 0644);
+MODULE_PARM_DESC(backoff_beta, "backoff beta (0-1024)");
+module_param(backoff_factor, uint, 0644);
+MODULE_PARM_DESC(backoff_factor, "backoff probability scale factor");
+module_param(hystart_detect, uint, 0644);
+MODULE_PARM_DESC(hystart_detect, "use Hybrid Slow start "
+ "(0: disabled, 1: ACK train, 2: delay threshold, 3: both)");
+module_param(use_ineff, uint, 0644);
+MODULE_PARM_DESC(use_ineff, "use ineffectual backoff detection (threshold)");
+module_param(use_shadow, bool, 0644);
+MODULE_PARM_DESC(use_shadow, "use shadow window heuristic");
+module_param(use_tolerance, bool, 0644);
+MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic");
+
+struct minmax {
+ union {
+ struct {
+ s32 min;
+ s32 max;
+ };
+ u64 v64;
+ };
+};
+
+enum cdg_state {
+ CDG_UNKNOWN = 0,
+ CDG_NONFULL = 1,
+ CDG_FULL = 2,
+ CDG_BACKOFF = 3,
+};
+
+struct cdg {
+ struct minmax rtt;
+ struct minmax rtt_prev;
+ struct minmax *gradients;
+ struct minmax gsum;
+ bool gfilled;
+ u8 tail;
+ u8 state;
+ u8 delack;
+ u32 rtt_seq;
+ u32 undo_cwnd;
+ u32 shadow_wnd;
+ u16 backoff_cnt;
+ u16 sample_cnt;
+ s32 delay_min;
+ u32 last_ack;
+ u32 round_start;
+};
+
+/**
+ * nexp_u32 - negative base-e exponential
+ * @ux: x in units of micro
+ *
+ * Returns exp(ux * -1e-6) * U32_MAX.
+ */
+static u32 __pure nexp_u32(u32 ux)
+{
+ static const u16 v[] = {
+ /* exp(-x)*65536-1 for x = 0, 0.000256, 0.000512, ... */
+ 65535,
+ 65518, 65501, 65468, 65401, 65267, 65001, 64470, 63422,
+ 61378, 57484, 50423, 38795, 22965, 8047, 987, 14,
+ };
+ u32 msb = ux >> 8;
+ u32 res;
+ int i;
+
+ /* Cut off when ux >= 2^24 (actual result is <= 222/U32_MAX). */
+ if (msb > U16_MAX)
+ return 0;
+
+ /* Scale first eight bits linearly: */
+ res = U32_MAX - (ux & 0xff) * (U32_MAX / 1000000);
+
+ /* Obtain e^(x + y + ...) by computing e^x * e^y * ...: */
+ for (i = 1; msb; i++, msb >>= 1) {
+ u32 y = v[i & -(msb & 1)] + U32_C(1);
+
+ res = ((u64)res * y) >> 16;
+ }
+
+ return res;
+}
+
+/* Based on the HyStart algorithm (by Ha et al.) that is implemented in
+ * tcp_cubic. Differences/experimental changes:
+ * o Using Hayes' delayed ACK filter.
+ * o Using a usec clock for the ACK train.
+ * o Reset ACK train when application limited.
+ * o Invoked at any cwnd (i.e. also when cwnd < 16).
+ * o Invoked only when cwnd < ssthresh (i.e. not when cwnd == ssthresh).
+ */
+static void tcp_cdg_hystart_update(struct sock *sk)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min);
+ if (ca->delay_min == 0)
+ return;
+
+ if (hystart_detect & HYSTART_ACK_TRAIN) {
+ u32 now_us = local_clock() / NSEC_PER_USEC;
+
+ if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
+ ca->last_ack = now_us;
+ ca->round_start = now_us;
+ } else if (before(now_us, ca->last_ack + 3000)) {
+ u32 base_owd = max(ca->delay_min / 2U, 125U);
+
+ ca->last_ack = now_us;
+ if (after(now_us, ca->round_start + base_owd)) {
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTTRAINDETECT);
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTTRAINCWND,
+ tp->snd_cwnd);
+ tp->snd_ssthresh = tp->snd_cwnd;
+ return;
+ }
+ }
+ }
+
+ if (hystart_detect & HYSTART_DELAY) {
+ if (ca->sample_cnt < 8) {
+ ca->sample_cnt++;
+ } else {
+ s32 thresh = max(ca->delay_min + ca->delay_min / 8U,
+ 125U);
+
+ if (ca->rtt.min > thresh) {
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTDELAYDETECT);
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPHYSTARTDELAYCWND,
+ tp->snd_cwnd);
+ tp->snd_ssthresh = tp->snd_cwnd;
+ }
+ }
+ }
+}
+
+static s32 tcp_cdg_grad(struct cdg *ca)
+{
+ s32 gmin = ca->rtt.min - ca->rtt_prev.min;
+ s32 gmax = ca->rtt.max - ca->rtt_prev.max;
+ s32 grad;
+
+ if (ca->gradients) {
+ ca->gsum.min += gmin - ca->gradients[ca->tail].min;
+ ca->gsum.max += gmax - ca->gradients[ca->tail].max;
+ ca->gradients[ca->tail].min = gmin;
+ ca->gradients[ca->tail].max = gmax;
+ ca->tail = (ca->tail + 1) & (window - 1);
+ gmin = ca->gsum.min;
+ gmax = ca->gsum.max;
+ }
+
+ /* We keep sums to ignore gradients during cwnd reductions;
+ * the paper's smoothed gradients otherwise simplify to:
+ * (rtt_latest - rtt_oldest) / window.
+ *
+ * We also drop division by window here.
+ */
+ grad = gmin > 0 ? gmin : gmax;
+
+ /* Extrapolate missing values in gradient window: */
+ if (!ca->gfilled) {
+ if (!ca->gradients && window > 1)
+ grad *= window; /* Memory allocation failed. */
+ else if (ca->tail == 0)
+ ca->gfilled = true;
+ else
+ grad = (grad * window) / (int)ca->tail;
+ }
+
+ /* Backoff was effectual: */
+ if (gmin <= -32 || gmax <= -32)
+ ca->backoff_cnt = 0;
+
+ if (use_tolerance) {
+ /* Reduce small variations to zero: */
+ gmin = DIV_ROUND_CLOSEST(gmin, 64);
+ gmax = DIV_ROUND_CLOSEST(gmax, 64);
+
+ if (gmin > 0 && gmax <= 0)
+ ca->state = CDG_FULL;
+ else if ((gmin > 0 && gmax > 0) || gmax < 0)
+ ca->state = CDG_NONFULL;
+ }
+ return grad;
+}
+
+static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (prandom_u32() <= nexp_u32(grad * backoff_factor))
+ return false;
+
+ if (use_ineff) {
+ ca->backoff_cnt++;
+ if (ca->backoff_cnt > use_ineff)
+ return false;
+ }
+
+ ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
+ ca->state = CDG_BACKOFF;
+ tcp_enter_cwr(sk);
+ return true;
+}
+
+/* Not called in CWR or Recovery state. */
+static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 prior_snd_cwnd;
+ u32 incr;
+
+ if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+ tcp_cdg_hystart_update(sk);
+
+ if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
+ s32 grad = 0;
+
+ if (ca->rtt_prev.v64)
+ grad = tcp_cdg_grad(ca);
+ ca->rtt_seq = tp->snd_nxt;
+ ca->rtt_prev = ca->rtt;
+ ca->rtt.v64 = 0;
+ ca->last_ack = 0;
+ ca->sample_cnt = 0;
+
+ if (grad > 0 && tcp_cdg_backoff(sk, grad))
+ return;
+ }
+
+ if (!tcp_is_cwnd_limited(sk)) {
+ ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
+ return;
+ }
+
+ prior_snd_cwnd = tp->snd_cwnd;
+ tcp_reno_cong_avoid(sk, ack, acked);
+
+ incr = tp->snd_cwnd - prior_snd_cwnd;
+ ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
+}
+
+static void tcp_cdg_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (rtt_us <= 0)
+ return;
+
+ /* A heuristic for filtering delayed ACKs, adapted from:
+ * D.A. Hayes. "Timing enhancements to the FreeBSD kernel to support
+ * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010.
+ */
+ if (tp->sacked_out == 0) {
+ if (num_acked == 1 && ca->delack) {
+ /* A delayed ACK is only used for the minimum if it is
+ * provenly lower than an existing non-zero minimum.
+ */
+ ca->rtt.min = min(ca->rtt.min, rtt_us);
+ ca->delack--;
+ return;
+ } else if (num_acked > 1 && ca->delack < 5) {
+ ca->delack++;
+ }
+ }
+
+ ca->rtt.min = min_not_zero(ca->rtt.min, rtt_us);
+ ca->rtt.max = max(ca->rtt.max, rtt_us);
+}
+
+static u32 tcp_cdg_ssthresh(struct sock *sk)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ ca->undo_cwnd = tp->snd_cwnd;
+
+ if (ca->state == CDG_BACKOFF)
+ return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
+
+ if (ca->state == CDG_NONFULL && use_tolerance)
+ return tp->snd_cwnd;
+
+ ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
+ if (use_shadow)
+ return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
+ return max(2U, tp->snd_cwnd >> 1);
+}
+
+static u32 tcp_cdg_undo_cwnd(struct sock *sk)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+
+ return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd);
+}
+
+static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct minmax *gradients;
+
+ switch (ev) {
+ case CA_EVENT_CWND_RESTART:
+ gradients = ca->gradients;
+ if (gradients)
+ memset(gradients, 0, window * sizeof(gradients[0]));
+ memset(ca, 0, sizeof(*ca));
+
+ ca->gradients = gradients;
+ ca->rtt_seq = tp->snd_nxt;
+ ca->shadow_wnd = tp->snd_cwnd;
+ break;
+ case CA_EVENT_COMPLETE_CWR:
+ ca->state = CDG_UNKNOWN;
+ ca->rtt_seq = tp->snd_nxt;
+ ca->rtt_prev = ca->rtt;
+ ca->rtt.v64 = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static void tcp_cdg_init(struct sock *sk)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* We silently fall back to window = 1 if allocation fails. */
+ if (window > 1)
+ ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
+ GFP_NOWAIT | __GFP_NOWARN);
+ ca->rtt_seq = tp->snd_nxt;
+ ca->shadow_wnd = tp->snd_cwnd;
+}
+
+static void tcp_cdg_release(struct sock *sk)
+{
+ struct cdg *ca = inet_csk_ca(sk);
+
+ kfree(ca->gradients);
+}
+
+struct tcp_congestion_ops tcp_cdg __read_mostly = {
+ .cong_avoid = tcp_cdg_cong_avoid,
+ .cwnd_event = tcp_cdg_cwnd_event,
+ .pkts_acked = tcp_cdg_acked,
+ .undo_cwnd = tcp_cdg_undo_cwnd,
+ .ssthresh = tcp_cdg_ssthresh,
+ .release = tcp_cdg_release,
+ .init = tcp_cdg_init,
+ .owner = THIS_MODULE,
+ .name = "cdg",
+};
+
+static int __init tcp_cdg_register(void)
+{
+ if (backoff_beta > 1024 || window < 1 || window > 256)
+ return -ERANGE;
+ if (!is_power_of_2(window))
+ return -EINVAL;
+
+ BUILD_BUG_ON(sizeof(struct cdg) > ICSK_CA_PRIV_SIZE);
+ tcp_register_congestion_control(&tcp_cdg);
+ return 0;
+}
+
+static void __exit tcp_cdg_unregister(void)
+{
+ tcp_unregister_congestion_control(&tcp_cdg);
+}
+
+module_init(tcp_cdg_register);
+module_exit(tcp_cdg_unregister);
+MODULE_AUTHOR("Kenneth Klette Jonassen");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TCP CDG");
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 4c41c1287197..7092a61c4dc8 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -204,20 +204,26 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
/* Expired RTT */
if (!before(tp->snd_una, ca->next_seq)) {
- /* For avoiding denominator == 1. */
- if (ca->acked_bytes_total == 0)
- ca->acked_bytes_total = 1;
+ u64 bytes_ecn = ca->acked_bytes_ecn;
+ u32 alpha = ca->dctcp_alpha;
/* alpha = (1 - g) * alpha + g * F */
- ca->dctcp_alpha = ca->dctcp_alpha -
- (ca->dctcp_alpha >> dctcp_shift_g) +
- (ca->acked_bytes_ecn << (10U - dctcp_shift_g)) /
- ca->acked_bytes_total;
- if (ca->dctcp_alpha > DCTCP_MAX_ALPHA)
- /* Clamp dctcp_alpha to max. */
- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+ alpha -= alpha >> dctcp_shift_g;
+ if (bytes_ecn) {
+ /* If dctcp_shift_g == 1, a 32bit value would overflow
+ * after 8 Mbytes.
+ */
+ bytes_ecn <<= (10 - dctcp_shift_g);
+ do_div(bytes_ecn, max(1U, ca->acked_bytes_total));
+ alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA);
+ }
+ /* dctcp_alpha can be read from dctcp_get_info() without
+ * synchro, so we ask compiler to not use dctcp_alpha
+ * as a temporary variable in prior operations.
+ */
+ WRITE_ONCE(ca->dctcp_alpha, alpha);
dctcp_reset(tp, ca);
}
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c9ab964189a0..684f095d196e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -359,7 +359,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
/* Check #1 */
if (tp->rcv_ssthresh < tp->window_clamp &&
(int)tp->rcv_ssthresh < tcp_space(sk) &&
- !sk_under_memory_pressure(sk)) {
+ !tcp_under_memory_pressure(sk)) {
int incr;
/* Check #2. Increase window, if skb with such overhead
@@ -446,7 +446,7 @@ static void tcp_clamp_window(struct sock *sk)
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
- !sk_under_memory_pressure(sk) &&
+ !tcp_under_memory_pressure(sk) &&
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
sysctl_tcp_rmem[2]);
@@ -1130,7 +1130,12 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sacktag_state {
int reord;
int fack_count;
- long rtt_us; /* RTT measured by SACKing never-retransmitted data */
+ /* Timestamps for earliest and latest never-retransmitted segment
+ * that was SACKed. RTO needs the earliest RTT to stay conservative,
+ * but congestion control should still get an accurate delay signal.
+ */
+ struct skb_mstamp first_sackt;
+ struct skb_mstamp last_sackt;
int flag;
};
@@ -1233,14 +1238,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
- /* Pick the earliest sequence sacked for RTT */
- if (state->rtt_us < 0) {
- struct skb_mstamp now;
-
- skb_mstamp_get(&now);
- state->rtt_us = skb_mstamp_us_delta(&now,
- xmit_time);
- }
+ if (state->first_sackt.v64 == 0)
+ state->first_sackt = *xmit_time;
+ state->last_sackt = *xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1316,16 +1316,12 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
* code can come after this skb later on it's better to keep
* setting gso_size to something.
*/
- if (!skb_shinfo(prev)->gso_size) {
- skb_shinfo(prev)->gso_size = mss;
- skb_shinfo(prev)->gso_type = sk->sk_gso_type;
- }
+ if (!TCP_SKB_CB(prev)->tcp_gso_size)
+ TCP_SKB_CB(prev)->tcp_gso_size = mss;
/* CHECKME: To clear or not to clear? Mimics normal skb currently */
- if (tcp_skb_pcount(skb) <= 1) {
- skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_type = 0;
- }
+ if (tcp_skb_pcount(skb) <= 1)
+ TCP_SKB_CB(skb)->tcp_gso_size = 0;
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@@ -1634,7 +1630,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
static int
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
- u32 prior_snd_una, long *sack_rtt_us)
+ u32 prior_snd_una, struct tcp_sacktag_state *state)
{
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1642,7 +1638,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache;
- struct tcp_sacktag_state state;
struct sk_buff *skb;
int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks;
@@ -1650,9 +1645,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
int i, j;
int first_sack_index;
- state.flag = 0;
- state.reord = tp->packets_out;
- state.rtt_us = -1L;
+ state->flag = 0;
+ state->reord = tp->packets_out;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
@@ -1663,7 +1657,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una);
if (found_dup_sack)
- state.flag |= FLAG_DSACKING_ACK;
+ state->flag |= FLAG_DSACKING_ACK;
/* Eliminate too old ACKs, but take into
* account more or less fresh ones, they can
@@ -1728,7 +1722,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
}
skb = tcp_write_queue_head(sk);
- state.fack_count = 0;
+ state->fack_count = 0;
i = 0;
if (!tp->sacked_out) {
@@ -1762,10 +1756,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
/* Head todo? */
if (before(start_seq, cache->start_seq)) {
- skb = tcp_sacktag_skip(skb, sk, &state,
+ skb = tcp_sacktag_skip(skb, sk, state,
start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup,
- &state,
+ state,
start_seq,
cache->start_seq,
dup_sack);
@@ -1776,7 +1770,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
goto advance_sp;
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
- &state,
+ state,
cache->end_seq);
/* ...tail remains todo... */
@@ -1785,12 +1779,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
skb = tcp_highest_sack(sk);
if (!skb)
break;
- state.fack_count = tp->fackets_out;
+ state->fack_count = tp->fackets_out;
cache++;
goto walk;
}
- skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
+ skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
/* Check overlap against next cached too (past this one already) */
cache++;
continue;
@@ -1800,12 +1794,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
skb = tcp_highest_sack(sk);
if (!skb)
break;
- state.fack_count = tp->fackets_out;
+ state->fack_count = tp->fackets_out;
}
- skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
+ skb = tcp_sacktag_skip(skb, sk, state, start_seq);
walk:
- skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+ skb = tcp_sacktag_walk(skb, sk, next_dup, state,
start_seq, end_seq, dup_sack);
advance_sp:
@@ -1820,9 +1814,9 @@ advance_sp:
for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j];
- if ((state.reord < tp->fackets_out) &&
+ if ((state->reord < tp->fackets_out) &&
((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
- tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
+ tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
tcp_mark_lost_retrans(sk);
tcp_verify_left_out(tp);
@@ -1834,8 +1828,7 @@ out:
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
- *sack_rtt_us = state.rtt_us;
- return state.flag;
+ return state->flag;
}
/* Limits sacked_out so that sum with lost_out isn't ever larger than
@@ -2255,7 +2248,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
(oldcnt >= packets))
break;
- mss = skb_shinfo(skb)->gso_size;
+ mss = tcp_skb_mss(skb);
err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
mss, GFP_ATOMIC);
if (err < 0)
@@ -2555,6 +2548,7 @@ void tcp_enter_cwr(struct sock *sk)
tcp_set_ca_state(sk, TCP_CA_CWR);
}
}
+EXPORT_SYMBOL(tcp_enter_cwr);
static void tcp_try_keep_open(struct sock *sk)
{
@@ -3055,7 +3049,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
* arrived at the other end.
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- u32 prior_snd_una, long sack_rtt_us)
+ u32 prior_snd_una,
+ struct tcp_sacktag_state *sack)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct skb_mstamp first_ackt, last_ackt, now;
@@ -3063,8 +3058,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
bool fully_acked = true;
- long ca_seq_rtt_us = -1L;
+ long sack_rtt_us = -1L;
long seq_rtt_us = -1L;
+ long ca_rtt_us = -1L;
struct sk_buff *skb;
u32 pkts_acked = 0;
bool rtt_update;
@@ -3153,15 +3149,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
skb_mstamp_get(&now);
if (likely(first_ackt.v64)) {
seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
- ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+ ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+ }
+ if (sack->first_sackt.v64) {
+ sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
+ ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
}
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
if (flag & FLAG_ACKED) {
- const struct tcp_congestion_ops *ca_ops
- = inet_csk(sk)->icsk_ca_ops;
-
tcp_rearm_rto(sk);
if (unlikely(icsk->icsk_mtup.probe_size &&
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
@@ -3184,11 +3181,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
- if (ca_ops->pkts_acked) {
- long rtt_us = min_t(ulong, ca_seq_rtt_us, sack_rtt_us);
- ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
- }
-
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
@@ -3198,6 +3190,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tcp_rearm_rto(sk);
}
+ if (icsk->icsk_ca_ops->pkts_acked)
+ icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us);
+
#if FASTRETRANS_DEBUG > 0
WARN_ON((int)tp->sacked_out < 0);
WARN_ON((int)tp->lost_out < 0);
@@ -3238,7 +3233,7 @@ static void tcp_ack_probe(struct sock *sk)
* This function is not for random using!
*/
} else {
- unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+ unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
when, TCP_RTO_MAX);
@@ -3466,6 +3461,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_sacktag_state sack_state;
u32 prior_snd_una = tp->snd_una;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -3474,7 +3470,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets = tp->packets_out;
const int prior_unsacked = tp->packets_out - tp->sacked_out;
int acked = 0; /* Number of packets newly acked */
- long sack_rtt_us = -1L;
+
+ sack_state.first_sackt.v64 = 0;
/* We very likely will need to access write queue head. */
prefetchw(sk->sk_write_queue.next);
@@ -3538,7 +3535,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (TCP_SKB_CB(skb)->sacked)
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
- &sack_rtt_us);
+ &sack_state);
if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
flag |= FLAG_ECE;
@@ -3563,7 +3560,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
acked = tp->packets_out;
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
- sack_rtt_us);
+ &sack_state);
acked -= tp->packets_out;
/* Advance cwnd if state allows */
@@ -3615,7 +3612,7 @@ old_ack:
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
- &sack_rtt_us);
+ &sack_state);
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
}
@@ -4514,10 +4511,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (eaten <= 0) {
queue_and_out:
- if (eaten < 0 &&
- tcp_try_rmem_schedule(sk, skb, skb->truesize))
- goto drop;
-
+ if (eaten < 0) {
+ if (skb_queue_len(&sk->sk_receive_queue) == 0)
+ sk_forced_mem_schedule(sk, skb->truesize);
+ else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+ goto drop;
+ }
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
}
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
@@ -4788,7 +4787,7 @@ static int tcp_prune_queue(struct sock *sk)
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk);
- else if (sk_under_memory_pressure(sk))
+ else if (tcp_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
@@ -4832,7 +4831,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
return false;
/* If we are under global TCP memory pressure, do not expand. */
- if (sk_under_memory_pressure(sk))
+ if (tcp_under_memory_pressure(sk))
return false;
/* If we are under soft global TCP memory pressure, do not expand. */
@@ -6067,6 +6066,23 @@ static bool tcp_syn_flood_action(struct sock *sk,
return want_cookie;
}
+static void tcp_reqsk_record_syn(const struct sock *sk,
+ struct request_sock *req,
+ const struct sk_buff *skb)
+{
+ if (tcp_sk(sk)->save_syn) {
+ u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
+ u32 *copy;
+
+ copy = kmalloc(len + sizeof(u32), GFP_ATOMIC);
+ if (copy) {
+ copy[0] = len;
+ memcpy(&copy[1], skb_network_header(skb), len);
+ req->saved_syn = copy;
+ }
+ }
+}
+
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb)
@@ -6199,6 +6215,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->tfo_listener = false;
af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
}
+ tcp_reqsk_record_syn(sk, req, skb);
return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fc1c658ec6c1..d7d4c2b79cf2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1400,7 +1400,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
- if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+ if (tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
@@ -1626,6 +1626,7 @@ process:
skb->dev = NULL;
bh_lock_sock_nested(sk);
+ tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
ret = 0;
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
@@ -1646,7 +1647,7 @@ no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
- if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
+ if (tcp_checksum_complete(skb)) {
csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
bad_packet:
@@ -1670,10 +1671,6 @@ do_time_wait:
goto discard_it;
}
- if (skb->len < (th->doff << 2)) {
- inet_twsk_put(inet_twsk(sk));
- goto bad_packet;
- }
if (tcp_checksum_complete(skb)) {
inet_twsk_put(inet_twsk(sk));
goto csum_error;
@@ -1802,6 +1799,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
/* If socket is aborted during connect operation */
tcp_free_fastopen_req(tp);
+ tcp_saved_syn_free(tp);
sk_sockets_allocated_dec(sk);
sock_release_memcg(sk);
@@ -2410,12 +2408,15 @@ static int __net_init tcp_sk_init(struct net *net)
goto fail;
*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
}
+
net->ipv4.sysctl_tcp_ecn = 2;
+ net->ipv4.sysctl_tcp_ecn_fallback = 1;
+
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
- return 0;
+ return 0;
fail:
tcp_sk_exit(net);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 17e7339ee5ca..4bc00cb79e60 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -451,6 +451,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rcv_wup = newtp->copied_seq =
newtp->rcv_nxt = treq->rcv_isn + 1;
+ newtp->segs_in = 0;
newtp->snd_sml = newtp->snd_una =
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@ -539,6 +540,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
+ newtp->saved_syn = req->saved_syn;
+ req->saved_syn = NULL;
+
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
return newsk;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 3f7c2fca5431..9864a2dbadce 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -77,7 +77,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
oldlen = (u16)~skb->len;
__skb_pull(skb, thlen);
- mss = tcp_skb_mss(skb);
+ mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
@@ -242,7 +242,7 @@ found:
flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i);
- mss = tcp_skb_mss(p);
+ mss = skb_shinfo(p)->gso_size;
flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a369e8a70b2c..b1c218df2c85 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -50,8 +50,8 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1;
*/
int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
-/* Default TSQ limit of two TSO segments */
-int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
+/* Default TSQ limit of four TSO segments */
+int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
/* This limits the percentage of the congestion window which we
* will allow a single TSO frame to consume. Building TSO frames
@@ -350,6 +350,15 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
}
}
+static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
+{
+ if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
+ /* tp->ecn_flags are cleared at a later point in time when
+ * SYN ACK is ultimatively being received.
+ */
+ TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
+}
+
static void
tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
struct sock *sk)
@@ -393,8 +402,6 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
*/
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
-
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
@@ -402,8 +409,6 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
TCP_SKB_CB(skb)->sacked = 0;
tcp_skb_pcount_set(skb, 1);
- shinfo->gso_size = 0;
- shinfo->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
if (flags & (TCPHDR_SYN | TCPHDR_FIN))
@@ -994,6 +999,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
tcp_options_write((__be32 *)(th + 1), tp, &opts);
+ skb_shinfo(skb)->gso_type = sk->sk_gso_type;
if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
tcp_ecn_send(sk, skb, tcp_header_size);
@@ -1018,8 +1024,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
tcp_skb_pcount(skb));
- /* OK, its time to fill skb_shinfo(skb)->gso_segs */
+ tp->segs_out += tcp_skb_pcount(skb);
+ /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
+ skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
/* Our usage of tstamp should remain private */
skb->tstamp.tv64 = 0;
@@ -1056,25 +1064,17 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
}
/* Initialize TSO segments for a packet. */
-static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
- unsigned int mss_now)
+static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
-
- /* Make sure we own this skb before messing gso_size/gso_segs */
- WARN_ON_ONCE(skb_cloned(skb));
-
if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
tcp_skb_pcount_set(skb, 1);
- shinfo->gso_size = 0;
- shinfo->gso_type = 0;
+ TCP_SKB_CB(skb)->tcp_gso_size = 0;
} else {
tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
- shinfo->gso_size = mss_now;
- shinfo->gso_type = sk->sk_gso_type;
+ TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
}
}
@@ -1163,7 +1163,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
return -ENOMEM;
/* Get a new skb... force flag on. */
- buff = sk_stream_alloc_skb(sk, nsize, gfp);
+ buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
if (!buff)
return -ENOMEM; /* We'll just try again later. */
@@ -1206,8 +1206,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
old_factor = tcp_skb_pcount(skb);
/* Fix up tso_factor for both original and new SKB. */
- tcp_set_skb_tso_segs(sk, skb, mss_now);
- tcp_set_skb_tso_segs(sk, buff, mss_now);
+ tcp_set_skb_tso_segs(skb, mss_now);
+ tcp_set_skb_tso_segs(buff, mss_now);
/* If this packet has been sent out already, we must
* adjust the various packet counters.
@@ -1287,7 +1287,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
/* Any change of skb->len requires recalculation of tso factor. */
if (tcp_skb_pcount(skb) > 1)
- tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
+ tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
return 0;
}
@@ -1619,13 +1619,12 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
-static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
- unsigned int mss_now)
+static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
- tcp_set_skb_tso_segs(sk, skb, mss_now);
+ tcp_set_skb_tso_segs(skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
return tso_segs;
@@ -1680,7 +1679,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int cwnd_quota;
- tcp_init_tso_segs(sk, skb, cur_mss);
+ tcp_init_tso_segs(skb, cur_mss);
if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
return 0;
@@ -1722,7 +1721,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
if (skb->len != skb->data_len)
return tcp_fragment(sk, skb, len, mss_now, gfp);
- buff = sk_stream_alloc_skb(sk, 0, gfp);
+ buff = sk_stream_alloc_skb(sk, 0, gfp, true);
if (unlikely(!buff))
return -ENOMEM;
@@ -1749,8 +1748,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
tcp_fragment_tstamp(skb, buff);
/* Fix up tso_factor for both original and new SKB. */
- tcp_set_skb_tso_segs(sk, skb, mss_now);
- tcp_set_skb_tso_segs(sk, buff, mss_now);
+ tcp_set_skb_tso_segs(skb, mss_now);
+ tcp_set_skb_tso_segs(buff, mss_now);
/* Link BUFF into the send queue. */
__skb_header_release(buff);
@@ -1941,7 +1940,7 @@ static int tcp_mtu_probe(struct sock *sk)
}
/* We're allowed to probe. Build it now. */
- nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
+ nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb)
return -1;
sk->sk_wmem_queued += nskb->truesize;
@@ -1984,7 +1983,7 @@ static int tcp_mtu_probe(struct sock *sk)
skb->len, 0);
} else {
__pskb_trim_head(skb, copy);
- tcp_set_skb_tso_segs(sk, skb, mss_now);
+ tcp_set_skb_tso_segs(skb, mss_now);
}
TCP_SKB_CB(skb)->seq += copy;
}
@@ -1994,7 +1993,7 @@ static int tcp_mtu_probe(struct sock *sk)
if (len >= probe_size)
break;
}
- tcp_init_tso_segs(sk, nskb, nskb->len);
+ tcp_init_tso_segs(nskb, nskb->len);
/* We're ready to send. If this fails, the probe will
* be resegmented into mss-sized pieces by tcp_write_xmit().
@@ -2056,7 +2055,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
- tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
+ tso_segs = tcp_init_tso_segs(skb, mss_now);
BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
@@ -2078,7 +2077,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
- if (tso_segs == 1 || !max_segs) {
+ if (tso_segs == 1) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ?
nonagle : TCP_NAGLE_PUSH))))
@@ -2091,7 +2090,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
limit = mss_now;
- if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int,
cwnd_quota,
@@ -2392,7 +2391,7 @@ u32 __tcp_select_window(struct sock *sk)
if (free_space < (full_space >> 1)) {
icsk->icsk_ack.quick = 0;
- if (sk_under_memory_pressure(sk))
+ if (tcp_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
4U * tp->advmss);
@@ -2610,11 +2609,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (unlikely(oldpcount > 1)) {
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
- tcp_init_tso_segs(sk, skb, cur_mss);
+ tcp_init_tso_segs(skb, cur_mss);
tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
}
}
+ /* RFC3168, section 6.1.1.1. ECN fallback */
+ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
+ tcp_ecn_clear_syn(sk, skb);
+
tcp_retrans_try_collapse(sk, skb, cur_mss);
/* Make a copy, if the first transmission SKB clone we made
@@ -2816,8 +2819,10 @@ begin_fwd:
* connection tear down and (memory) recovery.
* Otherwise tcp_send_fin() could be tempted to either delay FIN
* or even be forced to close flow without any FIN.
+ * In general, we want to allow one skb per socket to avoid hangs
+ * with edge trigger epoll()
*/
-static void sk_forced_wmem_schedule(struct sock *sk, int size)
+void sk_forced_mem_schedule(struct sock *sk, int size)
{
int amt, status;
@@ -2841,7 +2846,7 @@ void tcp_send_fin(struct sock *sk)
* Note: in the latter case, FIN packet will be sent after a timeout,
* as TCP stack thinks it has already been transmitted.
*/
- if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+ if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
coalesce:
TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
TCP_SKB_CB(tskb)->end_seq++;
@@ -2864,7 +2869,7 @@ coalesce:
return;
}
skb_reserve(skb, MAX_TCP_HEADER);
- sk_forced_wmem_schedule(sk, skb->truesize);
+ sk_forced_mem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
@@ -3175,7 +3180,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
/* limit to order-0 allocations */
space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
- syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
+ syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
if (!syn_data)
goto fallback;
syn_data->ip_summed = CHECKSUM_PARTIAL;
@@ -3241,7 +3246,7 @@ int tcp_connect(struct sock *sk)
return 0;
}
- buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+ buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
if (unlikely(!buff))
return -ENOBUFS;
@@ -3382,7 +3387,7 @@ EXPORT_SYMBOL_GPL(tcp_send_ack);
* one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
* out-of-date with SND.UNA-1 to probe window.
*/
-static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
+static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -3400,6 +3405,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb_mstamp_get(&skb->skb_mstamp);
+ NET_INC_STATS_BH(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
@@ -3407,12 +3413,12 @@ void tcp_send_window_probe(struct sock *sk)
{
if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
- tcp_xmit_probe_skb(sk, 0);
+ tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
}
}
/* Initiate keepalive or window probe from timer. */
-int tcp_write_wakeup(struct sock *sk)
+int tcp_write_wakeup(struct sock *sk, int mib)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
@@ -3440,7 +3446,7 @@ int tcp_write_wakeup(struct sock *sk)
if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
return -1;
} else if (!tcp_skb_pcount(skb))
- tcp_set_skb_tso_segs(sk, skb, mss);
+ tcp_set_skb_tso_segs(skb, mss);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
@@ -3449,8 +3455,8 @@ int tcp_write_wakeup(struct sock *sk)
return err;
} else {
if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
- tcp_xmit_probe_skb(sk, 1);
- return tcp_xmit_probe_skb(sk, 0);
+ tcp_xmit_probe_skb(sk, 1, mib);
+ return tcp_xmit_probe_skb(sk, 0, mib);
}
}
@@ -3464,7 +3470,7 @@ void tcp_send_probe0(struct sock *sk)
unsigned long probe_max;
int err;
- err = tcp_write_wakeup(sk);
+ err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
if (tp->packets_out || !tcp_send_head(sk)) {
/* Cancel probe timer, if it is not required. */
@@ -3490,7 +3496,7 @@ void tcp_send_probe0(struct sock *sk)
probe_max = TCP_RESOURCE_PROBE_INTERVAL;
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- inet_csk_rto_backoff(icsk, probe_max),
+ tcp_probe0_when(sk, probe_max),
TCP_RTO_MAX);
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8c65dc147d8b..5b752f58a900 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -247,7 +247,7 @@ void tcp_delack_timer_handler(struct sock *sk)
}
out:
- if (sk_under_memory_pressure(sk))
+ if (tcp_under_memory_pressure(sk))
sk_mem_reclaim(sk);
}
@@ -616,7 +616,7 @@ static void tcp_keepalive_timer (unsigned long data)
tcp_write_err(sk);
goto out;
}
- if (tcp_write_wakeup(sk) <= 0) {
+ if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
icsk->icsk_probes_out++;
elapsed = keepalive_intvl_when(tp);
} else {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 6bb98cc193c9..933ea903f7b8 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -15,12 +15,10 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
struct socket *sock = NULL;
struct sockaddr_in udp_addr;
- err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
+ err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
if (err < 0)
goto error;
- sk_change_net(sock->sk, net);
-
udp_addr.sin_family = AF_INET;
udp_addr.sin_addr = cfg->local_ip;
udp_addr.sin_port = cfg->local_udp_port;
@@ -47,7 +45,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
error:
if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
- sk_release_kernel(sock->sk);
+ sock_release(sock);
}
*sockp = NULL;
return err;
@@ -101,7 +99,7 @@ void udp_tunnel_sock_release(struct socket *sock)
{
rcu_assign_sk_user_data(sock->sk, NULL);
kernel_sock_shutdown(sock, SHUT_RDWR);
- sk_release_kernel(sock->sk);
+ sock_release(sock);
}
EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2e8c06108ab9..0f3f1999719a 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -48,4 +48,5 @@ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
ifneq ($(CONFIG_IPV6),)
obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o
+obj-y += mcast_snoop.o
endif
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 37b70e82bff8..21c2c818df3b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2121,6 +2121,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
if (!fn)
goto out;
+
+ noflags |= RTF_CACHE;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->dst.dev->ifindex != dev->ifindex)
continue;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index eef63b394c5a..7de52b65173f 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -167,7 +167,7 @@ lookup_protocol:
WARN_ON(!answer_prot->slab);
err = -ENOBUFS;
- sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
+ sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern);
if (!sk)
goto out;
@@ -362,7 +362,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
np->saddr = addr->sin6_addr;
/* Make sure we are allowed to bind here. */
- if (sk->sk_prot->get_port(sk, snum)) {
+ if ((snum || !inet->bind_address_no_port) &&
+ sk->sk_prot->get_port(sk, snum)) {
inet_reset_saddr(sk);
err = -EADDRINUSE;
goto out;
@@ -768,6 +769,7 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.auto_flowlabels = 0;
net->ipv6.sysctl.idgen_retries = 3;
net->ipv6.sysctl.idgen_delay = 1 * HZ;
+ net->ipv6.sysctl.flowlabel_state_ranges = 1;
atomic_set(&net->ipv6.fib6_sernum, 1);
err = ipv6_init_mibs(net);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 2c2b5d51f15c..713d7434c911 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -207,7 +207,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
struct inet_peer *peer;
peer = inet_getpeer_v6(net->ipv6.peers,
- &rt->rt6i_dst.addr, 1);
+ &fl6->daddr, 1);
res = inet_peer_xrlim_allow(peer, tmo);
if (peer)
inet_putpeer(peer);
@@ -337,7 +337,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
* We won't send icmp if the destination is known
* anycast.
*/
- if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
+ if (ipv6_anycast_destination(dst, &fl6->daddr)) {
net_dbg_ratelimited("icmp6_send: acast source\n");
dst_release(dst);
return ERR_PTR(-EINVAL);
@@ -564,7 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (!ipv6_unicast_destination(skb) &&
!(net->ipv6.sysctl.anycast_src_echo_reply &&
- ipv6_anycast_destination(skb)))
+ ipv6_anycast_destination(skb_dst(skb), saddr)))
saddr = NULL;
memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 871641bc1ed4..b4fd96de97e6 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -257,7 +257,7 @@ not_unique:
return -EADDRNOTAVAIL;
}
-static inline u32 inet6_sk_port_offset(const struct sock *sk)
+static u32 inet6_sk_port_offset(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
@@ -269,7 +269,11 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk)
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
- return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
+ u32 port_offset = 0;
+
+ if (!inet_sk(sk)->inet_num)
+ port_offset = inet6_sk_port_offset(sk);
+ return __inet_hash_connect(death_row, sk, port_offset,
__inet6_check_established);
}
EXPORT_SYMBOL_GPL(inet6_hash_connect);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index bde57b113009..55d19861ab20 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -154,10 +154,32 @@ static void node_free(struct fib6_node *fn)
kmem_cache_free(fib6_node_kmem, fn);
}
+static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
+{
+ int cpu;
+
+ if (!non_pcpu_rt->rt6i_pcpu)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rt6_info **ppcpu_rt;
+ struct rt6_info *pcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
+ pcpu_rt = *ppcpu_rt;
+ if (pcpu_rt) {
+ dst_free(&pcpu_rt->dst);
+ *ppcpu_rt = NULL;
+ }
+ }
+}
+
static void rt6_release(struct rt6_info *rt)
{
- if (atomic_dec_and_test(&rt->rt6i_ref))
+ if (atomic_dec_and_test(&rt->rt6i_ref)) {
+ rt6_free_pcpu(rt);
dst_free(&rt->dst);
+ }
}
static void fib6_link_table(struct net *net, struct fib6_table *tb)
@@ -738,6 +760,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
rt6_clean_expires(iter);
else
rt6_set_expires(iter, rt->dst.expires);
+ iter->rt6i_pmtu = rt->rt6i_pmtu;
return -EEXIST;
}
/* If we have the same destination and the same metric,
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index d491125011c4..1f9ebe3cbb4a 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -595,6 +595,10 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
return -EINVAL;
+ if (net->ipv6.sysctl.flowlabel_state_ranges &&
+ (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
+ return -ERANGE;
+
fl = fl_create(net, sk, &freq, optval, optlen, &err);
if (!fl)
return err;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index bc09cb97b840..d5f7716662db 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
}
rcu_read_lock_bh();
- nexthop = rt6_nexthop((struct rt6_info *)dst);
+ nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -459,7 +459,7 @@ int ip6_forward(struct sk_buff *skb)
else
target = &hdr->daddr;
- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+ peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
@@ -551,7 +551,7 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
int hroom, troom;
- __be32 frag_id = 0;
+ __be32 frag_id;
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -564,18 +564,17 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
/* We must not fragment if the socket is set to force MTU discovery
* or if the skb it not generated by a local socket.
*/
- if (unlikely(!skb->ignore_df && skb->len > mtu) ||
- (IP6CB(skb)->frag_max_size &&
- IP6CB(skb)->frag_max_size > mtu)) {
- if (skb->sk && dst_allfrag(skb_dst(skb)))
- sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+ if (unlikely(!skb->ignore_df && skb->len > mtu))
+ goto fail_toobig;
- skb->dev = skb_dst(skb)->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_FRAGFAILS);
- kfree_skb(skb);
- return -EMSGSIZE;
+ if (IP6CB(skb)->frag_max_size) {
+ if (IP6CB(skb)->frag_max_size > mtu)
+ goto fail_toobig;
+
+ /* don't send fragments larger than what we received */
+ mtu = IP6CB(skb)->frag_max_size;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
}
if (np && np->frag_size < mtu) {
@@ -584,6 +583,9 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
}
mtu -= hlen + sizeof(struct frag_hdr);
+ frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
+ &ipv6_hdr(skb)->saddr);
+
if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb);
struct sk_buff *frag2;
@@ -632,11 +634,10 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), tmp_hdr, hlen);
- ipv6_select_ident(net, fh, rt);
fh->nexthdr = nexthdr;
fh->reserved = 0;
fh->frag_off = htons(IP6_MF);
- frag_id = fh->identification;
+ fh->identification = frag_id;
first_len = skb_pagelen(skb);
skb->data_len = first_len - skb_headlen(skb);
@@ -778,11 +779,7 @@ slow_path:
*/
fh->nexthdr = nexthdr;
fh->reserved = 0;
- if (!frag_id) {
- ipv6_select_ident(net, fh, rt);
- frag_id = fh->identification;
- } else
- fh->identification = frag_id;
+ fh->identification = frag_id;
/*
* Copy a block of the IP datagram.
@@ -815,6 +812,14 @@ slow_path:
consume_skb(skb);
return err;
+fail_toobig:
+ if (skb->sk && dst_allfrag(skb_dst(skb)))
+ sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+
+ skb->dev = skb_dst(skb)->dev;
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ err = -EMSGSIZE;
+
fail:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
@@ -936,7 +941,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
*/
rt = (struct rt6_info *) *dst;
rcu_read_lock_bh();
- n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
+ n = __ipv6_neigh_lookup_noref(rt->dst.dev,
+ rt6_nexthop(rt, &fl6->daddr));
err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
rcu_read_unlock_bh();
@@ -1060,11 +1066,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
int transhdrlen, int mtu, unsigned int flags,
- struct rt6_info *rt)
+ const struct flowi6 *fl6)
{
struct sk_buff *skb;
- struct frag_hdr fhdr;
int err;
/* There is support for UDP large send offload by network
@@ -1106,8 +1111,9 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
sizeof(struct frag_hdr)) & ~7;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- ipv6_select_ident(sock_net(sk), &fhdr, rt);
- skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+ skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
+ &fl6->daddr,
+ &fl6->saddr);
append:
return skb_append_datato_frags(sk, skb, getfrag, from,
@@ -1332,7 +1338,7 @@ emsgsize:
(sk->sk_type == SOCK_DGRAM)) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen,
- transhdrlen, mtu, flags, rt);
+ transhdrlen, mtu, flags, fl6);
if (err)
goto error;
return 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5cafd92c2312..2e67b660118b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *) dst;
- t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ t->dst_cookie = rt6_get_cookie(rt);
dst_release(t->dst_cache);
t->dst_cache = dst;
}
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index bba8903e871f..e1a1136bda7c 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -19,12 +19,10 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
int err;
struct socket *sock = NULL;
- err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
+ err = sock_create_kern(net, AF_INET6, SOCK_DGRAM, 0, &sock);
if (err < 0)
goto error;
- sk_change_net(sock->sk, net);
-
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
sizeof(udp6_addr.sin6_addr));
@@ -55,7 +53,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
error:
if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
- sk_release_kernel(sock->sk);
+ sock_release(sock);
}
*sockp = NULL;
return err;
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
new file mode 100644
index 000000000000..df8afe5ab31e
--- /dev/null
+++ b/net/ipv6/mcast_snoop.c
@@ -0,0 +1,213 @@
+/* Copyright (C) 2010: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ * Copyright (C) 2015: Linus Lüssing <linus.luessing@c0d3.blue>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Based on the MLD support added to br_multicast.c by YOSHIFUJI Hideaki.
+ */
+
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <net/mld.h>
+#include <net/addrconf.h>
+#include <net/ip6_checksum.h>
+
+static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ip6h;
+ unsigned int len;
+ unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h);
+
+ if (!pskb_may_pull(skb, offset))
+ return -EINVAL;
+
+ ip6h = ipv6_hdr(skb);
+
+ if (ip6h->version != 6)
+ return -EINVAL;
+
+ len = offset + ntohs(ip6h->payload_len);
+ if (skb->len < len || len <= offset)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipv6_mc_check_exthdrs(struct sk_buff *skb)
+{
+ const struct ipv6hdr *ip6h;
+ int offset;
+ u8 nexthdr;
+ __be16 frag_off;
+
+ ip6h = ipv6_hdr(skb);
+
+ if (ip6h->nexthdr != IPPROTO_HOPOPTS)
+ return -ENOMSG;
+
+ nexthdr = ip6h->nexthdr;
+ offset = skb_network_offset(skb) + sizeof(*ip6h);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+
+ if (offset < 0)
+ return -EINVAL;
+
+ if (nexthdr != IPPROTO_ICMPV6)
+ return -ENOMSG;
+
+ skb_set_transport_header(skb, offset);
+
+ return 0;
+}
+
+static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb)
+{
+ unsigned int len = skb_transport_offset(skb);
+
+ len += sizeof(struct mld2_report);
+
+ return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+}
+
+static int ipv6_mc_check_mld_query(struct sk_buff *skb)
+{
+ struct mld_msg *mld;
+ unsigned int len = skb_transport_offset(skb);
+
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+ return -EINVAL;
+
+ len += sizeof(struct mld_msg);
+ if (skb->len < len)
+ return -EINVAL;
+
+ /* MLDv1? */
+ if (skb->len != len) {
+ /* or MLDv2? */
+ len += sizeof(struct mld2_query) - sizeof(struct mld_msg);
+ if (skb->len < len || !pskb_may_pull(skb, len))
+ return -EINVAL;
+ }
+
+ mld = (struct mld_msg *)skb_transport_header(skb);
+
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
+ * all-nodes destination address (ff02::1) for general queries
+ */
+ if (ipv6_addr_any(&mld->mld_mca) &&
+ !ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+{
+ struct mld_msg *mld = (struct mld_msg *)skb_transport_header(skb);
+
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REDUCTION:
+ case ICMPV6_MGM_REPORT:
+ /* fall through */
+ return 0;
+ case ICMPV6_MLD2_REPORT:
+ return ipv6_mc_check_mld_reportv2(skb);
+ case ICMPV6_MGM_QUERY:
+ return ipv6_mc_check_mld_query(skb);
+ default:
+ return -ENOMSG;
+ }
+}
+
+static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
+{
+ return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
+}
+
+static int __ipv6_mc_check_mld(struct sk_buff *skb,
+ struct sk_buff **skb_trimmed)
+
+{
+ struct sk_buff *skb_chk = NULL;
+ unsigned int transport_len;
+ unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
+ int ret;
+
+ transport_len = ntohs(ipv6_hdr(skb)->payload_len);
+ transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
+
+ skb_get(skb);
+ skb_chk = skb_checksum_trimmed(skb, transport_len,
+ ipv6_mc_validate_checksum);
+ if (!skb_chk)
+ return -EINVAL;
+
+ if (!pskb_may_pull(skb_chk, len)) {
+ kfree_skb(skb_chk);
+ return -EINVAL;
+ }
+
+ ret = ipv6_mc_check_mld_msg(skb_chk);
+ if (ret) {
+ kfree_skb(skb_chk);
+ return ret;
+ }
+
+ if (skb_trimmed)
+ *skb_trimmed = skb_chk;
+ else
+ kfree_skb(skb_chk);
+
+ return 0;
+}
+
+/**
+ * ipv6_mc_check_mld - checks whether this is a sane MLD packet
+ * @skb: the skb to validate
+ * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
+ *
+ * Checks whether an IPv6 packet is a valid MLD packet. If so sets
+ * skb network and transport headers accordingly and returns zero.
+ *
+ * -EINVAL: A broken packet was detected, i.e. it violates some internet
+ * standard
+ * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
+ * -ENOMEM: A memory allocation failure happened.
+ *
+ * Optionally, an skb pointer might be provided via skb_trimmed (or set it
+ * to NULL): After parsing an MLD packet successfully it will point to
+ * an skb which has its tail aligned to the IP packet end. This might
+ * either be the originally provided skb or a trimmed, cloned version if
+ * the skb frame had data beyond the IP packet. A cloned skb allows us
+ * to leave the original skb and its full frame unchanged (which might be
+ * desirable for layer 2 frame jugglers).
+ *
+ * The caller needs to release a reference count from any returned skb_trimmed.
+ */
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+{
+ int ret;
+
+ ret = ipv6_mc_check_ip6hdr(skb);
+ if (ret < 0)
+ return ret;
+
+ ret = ipv6_mc_check_exthdrs(skb);
+ if (ret < 0)
+ return ret;
+
+ return __ipv6_mc_check_mld(skb, skb_trimmed);
+}
+EXPORT_SYMBOL(ipv6_mc_check_mld);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 96f153c0846b..0a05b35a90fc 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1506,7 +1506,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
"Redirect: destination is not a neighbour\n");
goto release;
}
- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+ peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
ret = inet_peer_xrlim_allow(peer, 1*HZ);
if (peer)
inet_putpeer(peer);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 62f5b0d0bc9b..cdd085f8b770 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1459,7 +1459,6 @@ static int
compat_find_calc_match(struct xt_entry_match *m,
const char *name,
const struct ip6t_ip6 *ipv6,
- unsigned int hookmask,
int *size)
{
struct xt_match *match;
@@ -1528,8 +1527,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
- ret = compat_find_calc_match(ematch, name,
- &e->ipv6, e->comefrom, &off);
+ ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 85892af57364..21678acd4521 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -10,7 +10,8 @@
#include <net/secure_seq.h>
static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
- struct in6_addr *dst, struct in6_addr *src)
+ const struct in6_addr *dst,
+ const struct in6_addr *src)
{
u32 hash, id;
@@ -60,17 +61,17 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
-void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
- struct rt6_info *rt)
+__be32 ipv6_select_ident(struct net *net,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
static u32 ip6_idents_hashrnd __read_mostly;
u32 id;
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
- id = __ipv6_select_ident(net, ip6_idents_hashrnd, &rt->rt6i_dst.addr,
- &rt->rt6i_src.addr);
- fhdr->identification = htonl(id);
+ id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
+ return htonl(id);
}
EXPORT_SYMBOL(ipv6_select_ident);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8072bd4139b7..ca4700cb26c4 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -865,6 +865,9 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ if (inet->hdrincl)
+ fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
@@ -1324,13 +1327,7 @@ static struct inet_protosw rawv6_protosw = {
int __init rawv6_init(void)
{
- int ret;
-
- ret = inet6_register_protosw(&rawv6_protosw);
- if (ret)
- goto out;
-out:
- return ret;
+ return inet6_register_protosw(&rawv6_protosw);
}
void rawv6_exit(void)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c73ae5039e46..1a1122a6bbf5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,7 @@ enum rt6_nud_state {
RT6_NUD_SUCCEED = 1
};
-static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
- const struct in6_addr *dest);
+static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
static unsigned int ip6_mtu(const struct dst_entry *dst);
@@ -92,6 +91,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
+static void rt6_dst_from_metrics_check(struct rt6_info *rt);
static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -104,65 +104,82 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *gwaddr, int ifindex);
#endif
-static void rt6_bind_peer(struct rt6_info *rt, int create)
+struct uncached_list {
+ spinlock_t lock;
+ struct list_head head;
+};
+
+static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
+
+static void rt6_uncached_list_add(struct rt6_info *rt)
{
- struct inet_peer_base *base;
- struct inet_peer *peer;
+ struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
- base = inetpeer_base_ptr(rt->_rt6i_peer);
- if (!base)
- return;
+ rt->dst.flags |= DST_NOCACHE;
+ rt->rt6i_uncached_list = ul;
+
+ spin_lock_bh(&ul->lock);
+ list_add_tail(&rt->rt6i_uncached, &ul->head);
+ spin_unlock_bh(&ul->lock);
+}
- peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
- if (peer) {
- if (!rt6_set_peer(rt, peer))
- inet_putpeer(peer);
+static void rt6_uncached_list_del(struct rt6_info *rt)
+{
+ if (!list_empty(&rt->rt6i_uncached)) {
+ struct uncached_list *ul = rt->rt6i_uncached_list;
+
+ spin_lock_bh(&ul->lock);
+ list_del(&rt->rt6i_uncached);
+ spin_unlock_bh(&ul->lock);
}
}
-static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
+static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
{
- if (rt6_has_peer(rt))
- return rt6_peer_ptr(rt);
+ struct net_device *loopback_dev = net->loopback_dev;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
+ struct rt6_info *rt;
+
+ spin_lock_bh(&ul->lock);
+ list_for_each_entry(rt, &ul->head, rt6i_uncached) {
+ struct inet6_dev *rt_idev = rt->rt6i_idev;
+ struct net_device *rt_dev = rt->dst.dev;
+
+ if (rt_idev && (rt_idev->dev == dev || !dev) &&
+ rt_idev->dev != loopback_dev) {
+ rt->rt6i_idev = in6_dev_get(loopback_dev);
+ in6_dev_put(rt_idev);
+ }
- rt6_bind_peer(rt, create);
- return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
+ if (rt_dev && (rt_dev == dev || !dev) &&
+ rt_dev != loopback_dev) {
+ rt->dst.dev = loopback_dev;
+ dev_hold(rt->dst.dev);
+ dev_put(rt_dev);
+ }
+ }
+ spin_unlock_bh(&ul->lock);
+ }
}
-static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
+static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
{
- return __rt6_get_peer(rt, 1);
+ return dst_metrics_write_ptr(rt->dst.from);
}
static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
{
- struct rt6_info *rt = (struct rt6_info *) dst;
- struct inet_peer *peer;
- u32 *p = NULL;
+ struct rt6_info *rt = (struct rt6_info *)dst;
- if (!(rt->dst.flags & DST_HOST))
+ if (rt->rt6i_flags & RTF_PCPU)
+ return rt6_pcpu_cow_metrics(rt);
+ else if (rt->rt6i_flags & RTF_CACHE)
+ return NULL;
+ else
return dst_cow_metrics_generic(dst, old);
-
- peer = rt6_get_peer_create(rt);
- if (peer) {
- u32 *old_p = __DST_METRICS_PTR(old);
- unsigned long prev, new;
-
- p = peer->metrics;
- if (inet_metrics_new(peer) ||
- (old & DST_METRICS_FORCE_OVERWRITE))
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
-
- new = (unsigned long) p;
- prev = cmpxchg(&dst->_metrics, old, new);
-
- if (prev != old) {
- p = __DST_METRICS_PTR(prev);
- if (prev & DST_METRICS_READ_ONLY)
- p = NULL;
- }
- }
- return p;
}
static inline const void *choose_neigh_daddr(struct rt6_info *rt,
@@ -299,10 +316,10 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
#endif
/* allocate dst with ip6_dst_ops */
-static inline struct rt6_info *ip6_dst_alloc(struct net *net,
- struct net_device *dev,
- int flags,
- struct fib6_table *table)
+static struct rt6_info *__ip6_dst_alloc(struct net *net,
+ struct net_device *dev,
+ int flags,
+ struct fib6_table *table)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -311,21 +328,54 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
struct dst_entry *dst = &rt->dst;
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
- rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
INIT_LIST_HEAD(&rt->rt6i_siblings);
+ INIT_LIST_HEAD(&rt->rt6i_uncached);
+ }
+ return rt;
+}
+
+static struct rt6_info *ip6_dst_alloc(struct net *net,
+ struct net_device *dev,
+ int flags,
+ struct fib6_table *table)
+{
+ struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+
+ if (rt) {
+ rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
+ if (rt->rt6i_pcpu) {
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct rt6_info **p;
+
+ p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
+ /* no one shares rt */
+ *p = NULL;
+ }
+ } else {
+ dst_destroy((struct dst_entry *)rt);
+ return NULL;
+ }
}
+
return rt;
}
static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
- struct inet6_dev *idev = rt->rt6i_idev;
struct dst_entry *from = dst->from;
+ struct inet6_dev *idev;
+
+ dst_destroy_metrics_generic(dst);
- if (!(rt->dst.flags & DST_HOST))
- dst_destroy_metrics_generic(dst);
+ if (rt->rt6i_pcpu)
+ free_percpu(rt->rt6i_pcpu);
+ rt6_uncached_list_del(rt);
+
+ idev = rt->rt6i_idev;
if (idev) {
rt->rt6i_idev = NULL;
in6_dev_put(idev);
@@ -333,11 +383,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
dst->from = NULL;
dst_release(from);
-
- if (rt6_has_peer(rt)) {
- struct inet_peer *peer = rt6_peer_ptr(rt);
- inet_putpeer(peer);
- }
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -652,15 +697,33 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
u32 metric, int oif, int strict,
bool *do_rr)
{
- struct rt6_info *rt, *match;
+ struct rt6_info *rt, *match, *cont;
int mpri = -1;
match = NULL;
- for (rt = rr_head; rt && rt->rt6i_metric == metric;
- rt = rt->dst.rt6_next)
+ cont = NULL;
+ for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
+ if (rt->rt6i_metric != metric) {
+ cont = rt;
+ break;
+ }
+
match = find_match(rt, oif, strict, &mpri, match, do_rr);
- for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
- rt = rt->dst.rt6_next)
+ }
+
+ for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
+ if (rt->rt6i_metric != metric) {
+ cont = rt;
+ break;
+ }
+
+ match = find_match(rt, oif, strict, &mpri, match, do_rr);
+ }
+
+ if (match || !cont)
+ return match;
+
+ for (rt = cont; rt; rt = rt->dst.rt6_next)
match = find_match(rt, oif, strict, &mpri, match, do_rr);
return match;
@@ -694,6 +757,11 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
return match ? match : net->ipv6.ip6_null_entry;
}
+static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
+{
+ return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
+}
+
#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
const struct in6_addr *gwaddr)
@@ -872,9 +940,9 @@ int ip6_ins_rt(struct rt6_info *rt)
return __ip6_ins_rt(rt, &info, &mxc);
}
-static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr)
+static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
struct rt6_info *rt;
@@ -882,15 +950,26 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
* Clone the route.
*/
- rt = ip6_rt_copy(ort, daddr);
+ if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
+ ort = (struct rt6_info *)ort->dst.from;
- if (rt) {
+ rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
+ 0, ort->rt6i_table);
+
+ if (!rt)
+ return NULL;
+
+ ip6_rt_copy_init(rt, ort);
+ rt->rt6i_flags |= RTF_CACHE;
+ rt->rt6i_metric = 0;
+ rt->dst.flags |= DST_HOST;
+ rt->rt6i_dst.addr = *daddr;
+ rt->rt6i_dst.plen = 128;
+
+ if (!rt6_is_gw_or_nonexthop(ort)) {
if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
-
- rt->rt6i_flags |= RTF_CACHE;
-
#ifdef CONFIG_IPV6_SUBTREES
if (rt->rt6i_src.plen && saddr) {
rt->rt6i_src.addr = *saddr;
@@ -902,30 +981,65 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
return rt;
}
-static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
- const struct in6_addr *daddr)
+static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
{
- struct rt6_info *rt = ip6_rt_copy(ort, daddr);
+ struct rt6_info *pcpu_rt;
- if (rt)
- rt->rt6i_flags |= RTF_CACHE;
- return rt;
+ pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
+ rt->dst.dev, rt->dst.flags,
+ rt->rt6i_table);
+
+ if (!pcpu_rt)
+ return NULL;
+ ip6_rt_copy_init(pcpu_rt, rt);
+ pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
+ pcpu_rt->rt6i_flags |= RTF_PCPU;
+ return pcpu_rt;
+}
+
+/* It should be called with read_lock_bh(&tb6_lock) acquired */
+static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
+{
+ struct rt6_info *pcpu_rt, *prev, **p;
+
+ p = this_cpu_ptr(rt->rt6i_pcpu);
+ pcpu_rt = *p;
+
+ if (pcpu_rt)
+ goto done;
+
+ pcpu_rt = ip6_rt_pcpu_alloc(rt);
+ if (!pcpu_rt) {
+ struct net *net = dev_net(rt->dst.dev);
+
+ pcpu_rt = net->ipv6.ip6_null_entry;
+ goto done;
+ }
+
+ prev = cmpxchg(p, NULL, pcpu_rt);
+ if (prev) {
+ /* If someone did it before us, return prev instead */
+ dst_destroy(&pcpu_rt->dst);
+ pcpu_rt = prev;
+ }
+
+done:
+ dst_hold(&pcpu_rt->dst);
+ rt6_dst_from_metrics_check(pcpu_rt);
+ return pcpu_rt;
}
static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
struct flowi6 *fl6, int flags)
{
struct fib6_node *fn, *saved_fn;
- struct rt6_info *rt, *nrt;
+ struct rt6_info *rt;
int strict = 0;
- int attempts = 3;
- int err;
strict |= flags & RT6_LOOKUP_F_IFACE;
if (net->ipv6.devconf_all->forwarding == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
-redo_fib6_lookup_lock:
read_lock_bh(&table->tb6_lock);
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
@@ -944,51 +1058,52 @@ redo_rt6_select:
strict &= ~RT6_LOOKUP_F_REACHABLE;
fn = saved_fn;
goto redo_rt6_select;
- } else {
- dst_hold(&rt->dst);
- read_unlock_bh(&table->tb6_lock);
- goto out2;
}
}
- dst_hold(&rt->dst);
- read_unlock_bh(&table->tb6_lock);
- if (rt->rt6i_flags & RTF_CACHE)
- goto out2;
+ if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
+ dst_use(&rt->dst, jiffies);
+ read_unlock_bh(&table->tb6_lock);
- if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
- nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
- else if (!(rt->dst.flags & DST_HOST))
- nrt = rt6_alloc_clone(rt, &fl6->daddr);
- else
- goto out2;
+ rt6_dst_from_metrics_check(rt);
+ return rt;
+ } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
+ !(rt->rt6i_flags & RTF_GATEWAY))) {
+ /* Create a RTF_CACHE clone which will not be
+ * owned by the fib6 tree. It is for the special case where
+ * the daddr in the skb during the neighbor look-up is different
+ * from the fl6->daddr used to look-up route here.
+ */
- ip6_rt_put(rt);
- rt = nrt ? : net->ipv6.ip6_null_entry;
+ struct rt6_info *uncached_rt;
- dst_hold(&rt->dst);
- if (nrt) {
- err = ip6_ins_rt(nrt);
- if (!err)
- goto out2;
- }
+ dst_use(&rt->dst, jiffies);
+ read_unlock_bh(&table->tb6_lock);
- if (--attempts <= 0)
- goto out2;
+ uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
+ dst_release(&rt->dst);
- /*
- * Race condition! In the gap, when table->tb6_lock was
- * released someone could insert this route. Relookup.
- */
- ip6_rt_put(rt);
- goto redo_fib6_lookup_lock;
+ if (uncached_rt)
+ rt6_uncached_list_add(uncached_rt);
+ else
+ uncached_rt = net->ipv6.ip6_null_entry;
-out2:
- rt->dst.lastuse = jiffies;
- rt->dst.__use++;
+ dst_hold(&uncached_rt->dst);
+ return uncached_rt;
- return rt;
+ } else {
+ /* Get a percpu copy */
+
+ struct rt6_info *pcpu_rt;
+
+ rt->dst.lastuse = jiffies;
+ rt->dst.__use++;
+ pcpu_rt = rt6_get_pcpu_route(rt);
+ read_unlock_bh(&table->tb6_lock);
+
+ return pcpu_rt;
+ }
}
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
@@ -1059,7 +1174,6 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
new = &rt->dst;
memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
- rt6_init_peer(rt, net->ipv6.peers);
new->__use = 1;
new->input = dst_discard;
@@ -1093,6 +1207,33 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
* Destination cache support functions
*/
+static void rt6_dst_from_metrics_check(struct rt6_info *rt)
+{
+ if (rt->dst.from &&
+ dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
+ dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
+}
+
+static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
+{
+ if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+ return NULL;
+
+ if (rt6_check_expired(rt))
+ return NULL;
+
+ return &rt->dst;
+}
+
+static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
+{
+ if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+ rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+ return &rt->dst;
+ else
+ return NULL;
+}
+
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rt6_info *rt;
@@ -1103,13 +1244,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*/
- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
- return NULL;
- if (rt6_check_expired(rt))
- return NULL;
+ rt6_dst_from_metrics_check(rt);
- return dst;
+ if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
+ return rt6_dst_from_check(rt, cookie);
+ else
+ return rt6_check(rt, cookie);
}
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
@@ -1148,24 +1289,63 @@ static void ip6_link_failure(struct sk_buff *skb)
}
}
-static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
+static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
+{
+ struct net *net = dev_net(rt->dst.dev);
+
+ rt->rt6i_flags |= RTF_MODIFIED;
+ rt->rt6i_pmtu = mtu;
+ rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+}
+
+static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+ const struct ipv6hdr *iph, u32 mtu)
{
struct rt6_info *rt6 = (struct rt6_info *)dst;
- dst_confirm(dst);
- if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
- struct net *net = dev_net(dst->dev);
+ if (rt6->rt6i_flags & RTF_LOCAL)
+ return;
- rt6->rt6i_flags |= RTF_MODIFIED;
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
+ dst_confirm(dst);
+ mtu = max_t(u32, mtu, IPV6_MIN_MTU);
+ if (mtu >= dst_mtu(dst))
+ return;
- dst_metric_set(dst, RTAX_MTU, mtu);
- rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ if (rt6->rt6i_flags & RTF_CACHE) {
+ rt6_do_update_pmtu(rt6, mtu);
+ } else {
+ const struct in6_addr *daddr, *saddr;
+ struct rt6_info *nrt6;
+
+ if (iph) {
+ daddr = &iph->daddr;
+ saddr = &iph->saddr;
+ } else if (sk) {
+ daddr = &sk->sk_v6_daddr;
+ saddr = &inet6_sk(sk)->saddr;
+ } else {
+ return;
+ }
+ nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
+ if (nrt6) {
+ rt6_do_update_pmtu(nrt6, mtu);
+
+ /* ip6_ins_rt(nrt6) will bump the
+ * rt6->rt6i_node->fn_sernum
+ * which will fail the next rt6_check() and
+ * invalidate the sk->sk_dst_cache.
+ */
+ ip6_ins_rt(nrt6);
+ }
}
}
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+ __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+}
+
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
int oif, u32 mark)
{
@@ -1182,7 +1362,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
- ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
+ __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);
@@ -1341,12 +1521,17 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
static unsigned int ip6_mtu(const struct dst_entry *dst)
{
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+ unsigned int mtu = rt->rt6i_pmtu;
struct inet6_dev *idev;
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
goto out;
+ mtu = dst_metric_raw(dst, RTAX_MTU);
+ if (mtu)
+ goto out;
+
mtu = IPV6_MIN_MTU;
rcu_read_lock();
@@ -1590,10 +1775,8 @@ int ip6_route_add(struct fib6_config *cfg)
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
- if (rt->rt6i_dst.plen == 128) {
+ if (rt->rt6i_dst.plen == 128)
rt->dst.flags |= DST_HOST;
- dst_metrics_set_force_overwrite(&rt->dst);
- }
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1651,6 +1834,16 @@ int ip6_route_add(struct fib6_config *cfg)
int gwa_type;
gw_addr = &cfg->fc_gateway;
+
+ /* if gw_addr is local we will fail to detect this in case
+ * address is still TENTATIVE (DAD in progress). rt6_lookup()
+ * will return already-added prefix route via interface that
+ * prefix route was assigned to, which might be non-loopback.
+ */
+ err = -EINVAL;
+ if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+ goto out;
+
rt->rt6i_gateway = *gw_addr;
gwa_type = ipv6_addr_type(gw_addr);
@@ -1664,7 +1857,6 @@ int ip6_route_add(struct fib6_config *cfg)
(SIT, PtP, NBMA NOARP links) it is handy to allow
some exceptions. --ANK
*/
- err = -EINVAL;
if (!(gwa_type & IPV6_ADDR_UNICAST))
goto out;
@@ -1785,6 +1977,9 @@ static int ip6_route_del(struct fib6_config *cfg)
if (fn) {
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
+ if ((rt->rt6i_flags & RTF_CACHE) &&
+ !(cfg->fc_flags & RTF_CACHE))
+ continue;
if (cfg->fc_ifindex &&
(!rt->dst.dev ||
rt->dst.dev->ifindex != cfg->fc_ifindex))
@@ -1894,7 +2089,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
NEIGH_UPDATE_F_ISROUTER))
);
- nrt = ip6_rt_copy(rt, &msg->dest);
+ nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
if (!nrt)
goto out;
@@ -1926,42 +2121,35 @@ out:
* Misc support functions
*/
-static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
- const struct in6_addr *dest)
+static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
{
- struct net *net = dev_net(ort->dst.dev);
- struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
- ort->rt6i_table);
+ BUG_ON(from->dst.from);
- if (rt) {
- rt->dst.input = ort->dst.input;
- rt->dst.output = ort->dst.output;
- rt->dst.flags |= DST_HOST;
-
- rt->rt6i_dst.addr = *dest;
- rt->rt6i_dst.plen = 128;
- dst_copy_metrics(&rt->dst, &ort->dst);
- rt->dst.error = ort->dst.error;
- rt->rt6i_idev = ort->rt6i_idev;
- if (rt->rt6i_idev)
- in6_dev_hold(rt->rt6i_idev);
- rt->dst.lastuse = jiffies;
-
- if (ort->rt6i_flags & RTF_GATEWAY)
- rt->rt6i_gateway = ort->rt6i_gateway;
- else
- rt->rt6i_gateway = *dest;
- rt->rt6i_flags = ort->rt6i_flags;
- rt6_set_from(rt, ort);
- rt->rt6i_metric = 0;
+ rt->rt6i_flags &= ~RTF_EXPIRES;
+ dst_hold(&from->dst);
+ rt->dst.from = &from->dst;
+ dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
+}
+static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
+{
+ rt->dst.input = ort->dst.input;
+ rt->dst.output = ort->dst.output;
+ rt->rt6i_dst = ort->rt6i_dst;
+ rt->dst.error = ort->dst.error;
+ rt->rt6i_idev = ort->rt6i_idev;
+ if (rt->rt6i_idev)
+ in6_dev_hold(rt->rt6i_idev);
+ rt->dst.lastuse = jiffies;
+ rt->rt6i_gateway = ort->rt6i_gateway;
+ rt->rt6i_flags = ort->rt6i_flags;
+ rt6_set_from(rt, ort);
+ rt->rt6i_metric = ort->rt6i_metric;
#ifdef CONFIG_IPV6_SUBTREES
- memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
+ rt->rt6i_src = ort->rt6i_src;
#endif
- memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
- rt->rt6i_table = ort->rt6i_table;
- }
- return rt;
+ rt->rt6i_prefsrc = ort->rt6i_prefsrc;
+ rt->rt6i_table = ort->rt6i_table;
}
#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2336,6 +2524,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
fib6_clean_all(net, fib6_ifdown, &adn);
icmp6_clean_all(fib6_ifdown, &adn);
+ rt6_uncached_list_flush_dev(net, dev);
}
struct rt6_mtu_change_arg {
@@ -2373,11 +2562,20 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
PMTU discouvery.
*/
if (rt->dst.dev == arg->dev &&
- !dst_metric_locked(&rt->dst, RTAX_MTU) &&
- (dst_mtu(&rt->dst) >= arg->mtu ||
- (dst_mtu(&rt->dst) < arg->mtu &&
- dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
- dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+ !dst_metric_locked(&rt->dst, RTAX_MTU)) {
+ if (rt->rt6i_flags & RTF_CACHE) {
+ /* For RTF_CACHE with rt6i_pmtu == 0
+ * (i.e. a redirected route),
+ * the metrics of its rt->dst.from has already
+ * been updated.
+ */
+ if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
+ rt->rt6i_pmtu = arg->mtu;
+ } else if (dst_mtu(&rt->dst) >= arg->mtu ||
+ (dst_mtu(&rt->dst) < arg->mtu &&
+ dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
+ dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+ }
}
return 0;
}
@@ -2434,6 +2632,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_type == RTN_LOCAL)
cfg->fc_flags |= RTF_LOCAL;
+ if (rtm->rtm_flags & RTM_F_CLONED)
+ cfg->fc_flags |= RTF_CACHE;
+
cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -2608,6 +2809,7 @@ static int rt6_fill_node(struct net *net,
int iif, int type, u32 portid, u32 seq,
int prefix, int nowait, unsigned int flags)
{
+ u32 metrics[RTAX_MAX];
struct rtmsg *rtm;
struct nlmsghdr *nlh;
long expires;
@@ -2721,7 +2923,10 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
}
- if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
+ memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
+ if (rt->rt6i_pmtu)
+ metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
+ if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
if (rt->rt6i_flags & RTF_GATEWAY) {
@@ -3216,6 +3421,7 @@ static struct notifier_block ip6_route_dev_notifier = {
int __init ip6_route_init(void)
{
int ret;
+ int cpu;
ret = -ENOMEM;
ip6_dst_ops_template.kmem_cachep =
@@ -3275,6 +3481,13 @@ int __init ip6_route_init(void)
if (ret)
goto out_register_late_subsys;
+ for_each_possible_cpu(cpu) {
+ struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
+
+ INIT_LIST_HEAD(&ul->head);
+ spin_lock_init(&ul->lock);
+ }
+
out:
return ret;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 21bc2eb53c57..0909f4e0d53c 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -41,23 +41,6 @@ static __u16 const msstab[] = {
9000 - 60,
};
-static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
- struct request_sock *req,
- struct dst_entry *dst)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
- struct sock *child;
-
- child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
- if (child) {
- atomic_set(&req->rsk_refcnt, 1);
- inet_csk_reqsk_queue_add(sk, req, child);
- } else {
- reqsk_free(req);
- }
- return child;
-}
-
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv6_cookie_scratch);
@@ -264,7 +247,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->rcv_wscale = rcv_wscale;
ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
- ret = get_cookie_sock(sk, skb, req, dst);
+ ret = tcp_get_cookie_sock(sk, skb, req, dst);
out:
return ret;
out_free:
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index abcc79f649b3..4e705add4f18 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -68,6 +68,13 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+ {
+ .procname = "flowlabel_state_ranges",
+ .data = &init_net.ipv6.sysctl.flowlabel_state_ranges,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{ }
};
@@ -109,6 +116,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
+ ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
ipv6_route_table = ipv6_route_sysctl_init(net);
if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3adffb300238..6748c4277aff 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -99,8 +99,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
dst_hold(dst);
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
- if (rt->rt6i_node)
- inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+ inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
}
}
@@ -121,7 +120,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
- struct rt6_info *rt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
@@ -259,10 +257,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
- rt = (struct rt6_info *) dst;
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
- ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
+ ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
@@ -1251,7 +1248,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
- if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+ if (tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
@@ -1421,6 +1418,7 @@ process:
skb->dev = NULL;
bh_lock_sock_nested(sk);
+ tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
ret = 0;
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
@@ -1442,7 +1440,7 @@ no_tcp_socket:
tcp_v6_fill_cb(skb, hdr, th);
- if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
+ if (tcp_checksum_complete(skb)) {
csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
bad_packet:
@@ -1467,10 +1465,6 @@ do_time_wait:
tcp_v6_fill_cb(skb, hdr, th);
- if (skb->len < (th->doff<<2)) {
- inet_twsk_put(inet_twsk(sk));
- goto bad_packet;
- }
if (tcp_checksum_complete(skb)) {
inet_twsk_put(inet_twsk(sk));
goto csum_error;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index f337a908a76a..ed0583c1b9fc 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -71,20 +71,12 @@ static int xfrm6_get_tos(const struct flowi *fl)
return 0;
}
-static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst)
-{
- struct rt6_info *rt = (struct rt6_info *)xdst;
-
- rt6_init_peer(rt, net->ipv6.peers);
-}
-
static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
if (dst->ops->family == AF_INET6) {
struct rt6_info *rt = (struct rt6_info *)dst;
- if (rt->rt6i_node)
- path->path_cookie = rt->rt6i_node->fn_sernum;
+ path->path_cookie = rt6_get_cookie(rt);
}
path->u.rt6.rt6i_nfheader_len = nfheader_len;
@@ -106,16 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
return -ENODEV;
}
- rt6_transfer_peer(&xdst->u.rt6, rt);
-
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
RTF_LOCAL);
xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
xdst->u.rt6.rt6i_node = rt->rt6i_node;
- if (rt->rt6i_node)
- xdst->route_cookie = rt->rt6i_node->fn_sernum;
+ xdst->route_cookie = rt6_get_cookie(rt);
xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
xdst->u.rt6.rt6i_src = rt->rt6i_src;
@@ -255,10 +244,6 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
- if (rt6_has_peer(&xdst->u.rt6)) {
- struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
- inet_putpeer(peer);
- }
xfrm_dst_destroy(xdst);
}
@@ -308,7 +293,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.get_saddr = xfrm6_get_saddr,
.decode_session = _decode_session6,
.get_tos = xfrm6_get_tos,
- .init_dst = xfrm6_init_dst,
.init_path = xfrm6_init_path,
.fill_dst = xfrm6_fill_dst,
.blackhole_route = ip6_blackhole_route,
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 4ea5d7497b5f..48d0dc89b58d 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1347,7 +1347,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol,
goto out;
rc = -ENOMEM;
- sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
+ sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, kern);
if (!sk)
goto out;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index ee0ea25c8e7a..fae6822cc367 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1100,7 +1100,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
}
/* Allocate networking socket */
- sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto);
+ sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto, kern);
if (sk == NULL)
return -ENOMEM;
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 0c4c115a5cab..f2280f73b057 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -60,8 +60,8 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
* to avoid messing with for incoming connections requests and
* to accommodate devices that perform discovery slower than us.
* Jean II */
- timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)
- + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT);
+ timeout = msecs_to_jiffies(sysctl_slot_timeout) * (S - s)
+ + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT;
/* Set or re-set the timer. We reset the timer for each received
* discovery query, which allow us to automatically adjust to
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 6daa52a18d40..918151c11348 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -535,12 +535,12 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
sk->sk_type = parent->sk_type;
}
-static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
+static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
{
struct sock *sk;
struct iucv_sock *iucv;
- sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
+ sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
if (!sk)
return NULL;
iucv = iucv_sk(sk);
@@ -602,7 +602,7 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
return -ESOCKTNOSUPPORT;
}
- sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
+ sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
if (!sk)
return -ENOMEM;
@@ -1723,7 +1723,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
}
/* Create the new socket */
- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
if (!nsk) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
@@ -1933,7 +1933,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
goto out;
}
- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
bh_lock_sock(sk);
if ((sk->sk_state != IUCV_LISTEN) ||
sk_acceptq_is_full(sk) ||
diff --git a/net/key/af_key.c b/net/key/af_key.c
index f0d52d721b3a..9e834ec475a9 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -149,7 +149,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
return -EPROTONOSUPPORT;
err = -ENOMEM;
- sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto);
+ sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
if (sk == NULL)
goto out;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index a29a504492af..f6b090df3930 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1334,9 +1334,10 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
if (sock)
inet_shutdown(sock, 2);
} else {
- if (sock)
+ if (sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
- sk_release_kernel(sk);
+ sock_release(sock);
+ }
}
l2tp_tunnel_sock_put(sk);
@@ -1399,13 +1400,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
if (cfg->local_ip6 && cfg->peer_ip6) {
struct sockaddr_l2tpip6 ip6_addr = {0};
- err = sock_create_kern(AF_INET6, SOCK_DGRAM,
+ err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
IPPROTO_L2TP, &sock);
if (err < 0)
goto out;
- sk_change_net(sock->sk, net);
-
ip6_addr.l2tp_family = AF_INET6;
memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
sizeof(ip6_addr.l2tp_addr));
@@ -1429,13 +1428,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
{
struct sockaddr_l2tpip ip_addr = {0};
- err = sock_create_kern(AF_INET, SOCK_DGRAM,
+ err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
IPPROTO_L2TP, &sock);
if (err < 0)
goto out;
- sk_change_net(sock->sk, net);
-
ip_addr.l2tp_family = AF_INET;
ip_addr.l2tp_addr = cfg->local_ip;
ip_addr.l2tp_conn_id = tunnel_id;
@@ -1462,7 +1459,7 @@ out:
*sockp = sock;
if ((err < 0) && sock) {
kernel_sock_shutdown(sock, SHUT_RDWR);
- sk_release_kernel(sock->sk);
+ sock_release(sock);
*sockp = NULL;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index e9b0dec56b8e..f56c9f69e9f2 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -542,12 +542,12 @@ static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
/* socket() handler. Initialize a new struct sock.
*/
-static int pppol2tp_create(struct net *net, struct socket *sock)
+static int pppol2tp_create(struct net *net, struct socket *sock, int kern)
{
int error = -ENOMEM;
struct sock *sk;
- sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
+ sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, kern);
if (!sk)
goto out;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 17a8dff06090..8fd9febaa5ba 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -168,7 +168,7 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
rc = -ENOMEM;
- sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto);
+ sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto, kern);
if (sk) {
rc = 0;
llc_ui_sk_init(sock, sk);
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 81a61fce3afb..3e821daf9dd4 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -768,7 +768,7 @@ static struct sock *llc_create_incoming_sock(struct sock *sk,
struct llc_addr *daddr)
{
struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
- sk->sk_prot);
+ sk->sk_prot, 0);
struct llc_sock *newllc, *llc = llc_sk(sk);
if (!newsk)
@@ -931,9 +931,9 @@ static void llc_sk_init(struct sock *sk)
* Allocates a LLC sock and initializes it. Returns the new LLC sock
* or %NULL if there's no memory available for one
*/
-struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern)
{
- struct sock *sk = sk_alloc(net, family, priority, prot);
+ struct sock *sk = sk_alloc(net, family, priority, prot, kern);
if (!sk)
goto out;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 64a012a0c6e5..086de496a4c1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -302,6 +302,20 @@ config MAC80211_DEBUG_COUNTERS
---help---
Selecting this option causes mac80211 to keep additional
and very verbose statistics about TX and RX handler use
- and show them in debugfs.
+ as well as a few selected dot11 counters. These will be
+ exposed in debugfs.
+
+ Note that some of the counters are not concurrency safe
+ and may thus not always be accurate.
If unsure, say N.
+
+config MAC80211_STA_HASH_MAX_SIZE
+ int "Station hash table maximum size" if MAC80211_DEBUG_MENU
+ default 0
+ ---help---
+ Setting this option to a low value (e.g. 4) allows testing the
+ hash table with collisions relatively deterministically (just
+ connect more stations than the number selected here.)
+
+ If unsure, leave the default of 0.
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index cce9d425c718..c8ba2e77737c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -564,8 +564,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if ((tid >= IEEE80211_NUM_TIDS) ||
- !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
- (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
+ !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
+ ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ff347a0eebd4..bf7023f6c327 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2,7 +2,7 @@
* mac80211 configuration hooks for cfg80211
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2013-2015 Intel Mobile Communications GmbH
*
* This file is GPLv2 as found in COPYING.
*/
@@ -137,6 +137,9 @@ static int ieee80211_set_noack_map(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
sdata->noack_map = noack_map;
+
+ ieee80211_check_fast_xmit_iface(sdata);
+
return 0;
}
@@ -309,6 +312,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
u32 iv32;
u16 iv16;
int err = -ENOENT;
+ struct ieee80211_key_seq kseq = {};
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -339,10 +343,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
iv32 = key->u.tkip.tx.iv32;
iv16 = key->u.tkip.tx.iv16;
- if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
- drv_get_tkip_seq(sdata->local,
- key->conf.hw_key_idx,
- &iv32, &iv16);
+ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+ !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ drv_get_key_seq(sdata->local, key, &kseq);
+ iv32 = kseq.tkip.iv32;
+ iv16 = kseq.tkip.iv16;
+ }
seq[0] = iv16 & 0xff;
seq[1] = (iv16 >> 8) & 0xff;
@@ -355,52 +361,44 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
- pn64 = atomic64_read(&key->u.ccmp.tx_pn);
- seq[0] = pn64;
- seq[1] = pn64 >> 8;
- seq[2] = pn64 >> 16;
- seq[3] = pn64 >> 24;
- seq[4] = pn64 >> 32;
- seq[5] = pn64 >> 40;
- params.seq = seq;
- params.seq_len = 6;
- break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
- seq[0] = pn64;
- seq[1] = pn64 >> 8;
- seq[2] = pn64 >> 16;
- seq[3] = pn64 >> 24;
- seq[4] = pn64 >> 32;
- seq[5] = pn64 >> 40;
- params.seq = seq;
- params.seq_len = 6;
- break;
+ BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
+ offsetof(typeof(kseq), aes_cmac));
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
- seq[0] = pn64;
- seq[1] = pn64 >> 8;
- seq[2] = pn64 >> 16;
- seq[3] = pn64 >> 24;
- seq[4] = pn64 >> 32;
- seq[5] = pn64 >> 40;
- params.seq = seq;
- params.seq_len = 6;
- break;
+ BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
+ offsetof(typeof(kseq), aes_gmac));
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- pn64 = atomic64_read(&key->u.gcmp.tx_pn);
- seq[0] = pn64;
- seq[1] = pn64 >> 8;
- seq[2] = pn64 >> 16;
- seq[3] = pn64 >> 24;
- seq[4] = pn64 >> 32;
- seq[5] = pn64 >> 40;
+ BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
+ offsetof(typeof(kseq), gcmp));
+
+ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+ !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ drv_get_key_seq(sdata->local, key, &kseq);
+ memcpy(seq, kseq.ccmp.pn, 6);
+ } else {
+ pn64 = atomic64_read(&key->conf.tx_pn);
+ seq[0] = pn64;
+ seq[1] = pn64 >> 8;
+ seq[2] = pn64 >> 16;
+ seq[3] = pn64 >> 24;
+ seq[4] = pn64 >> 32;
+ seq[5] = pn64 >> 40;
+ }
params.seq = seq;
params.seq_len = 6;
break;
+ default:
+ if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ break;
+ if (WARN_ON(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
+ break;
+ drv_get_key_seq(sdata->local, key, &kseq);
+ params.seq = kseq.hw.seq;
+ params.seq_len = kseq.hw.seq_len;
+ break;
}
params.key = key->conf.key;
@@ -1372,6 +1370,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
sta->sdata = vlansdata;
+ ieee80211_check_fast_xmit(sta);
if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
prev_4addr != new_4addr) {
@@ -1764,7 +1763,7 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
/* our RSSI threshold implementation is supported only for
* devices that report signal in dBm.
*/
- if (!(sdata->local->hw.flags & IEEE80211_HW_SIGNAL_DBM))
+ if (!ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM))
return -ENOTSUPP;
conf->rssi_threshold = nconf->rssi_threshold;
}
@@ -2099,10 +2098,14 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
int err;
if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ ieee80211_check_fast_xmit_all(local);
+
err = drv_set_frag_threshold(local, wiphy->frag_threshold);
- if (err)
+ if (err) {
+ ieee80211_check_fast_xmit_all(local);
return err;
+ }
}
if ((changed & WIPHY_PARAM_COVERAGE_CLASS) ||
@@ -2404,7 +2407,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
return -EOPNOTSUPP;
if (enabled == sdata->u.mgd.powersave &&
@@ -2419,7 +2422,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
__ieee80211_request_smps_mgd(sdata, sdata->u.mgd.req_smps);
sdata_unlock(sdata);
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
ieee80211_recalc_ps(local, -1);
@@ -2463,7 +2466,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
- if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
ret = drv_set_bitrate_mask(local, sdata, mask);
if (ret)
return ret;
@@ -2514,6 +2517,19 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
return true;
}
+static u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local)
+{
+ lockdep_assert_held(&local->mtx);
+
+ local->roc_cookie_counter++;
+
+ /* wow, you wrapped 64 bits ... more likely a bug */
+ if (WARN_ON(local->roc_cookie_counter == 0))
+ local->roc_cookie_counter++;
+
+ return local->roc_cookie_counter;
+}
+
static int ieee80211_start_roc_work(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
@@ -2551,7 +2567,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
roc->req_duration = duration;
roc->frame = txskb;
roc->type = type;
- roc->mgmt_tx_cookie = (unsigned long)txskb;
roc->sdata = sdata;
INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
INIT_LIST_HEAD(&roc->dependents);
@@ -2561,17 +2576,10 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
* or the SKB (for mgmt TX)
*/
if (!txskb) {
- /* local->mtx protects this */
- local->roc_cookie_counter++;
- roc->cookie = local->roc_cookie_counter;
- /* wow, you wrapped 64 bits ... more likely a bug */
- if (WARN_ON(roc->cookie == 0)) {
- roc->cookie = 1;
- local->roc_cookie_counter++;
- }
+ roc->cookie = ieee80211_mgmt_tx_cookie(local);
*cookie = roc->cookie;
} else {
- *cookie = (unsigned long)txskb;
+ roc->mgmt_tx_cookie = *cookie;
}
/* if there's one pending or we're scanning, queue this one */
@@ -3244,13 +3252,43 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
return err;
}
+static struct sk_buff *ieee80211_make_ack_skb(struct ieee80211_local *local,
+ struct sk_buff *skb, u64 *cookie,
+ gfp_t gfp)
+{
+ unsigned long spin_flags;
+ struct sk_buff *ack_skb;
+ int id;
+
+ ack_skb = skb_copy(skb, gfp);
+ if (!ack_skb)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&local->ack_status_lock, spin_flags);
+ id = idr_alloc(&local->ack_status_frames, ack_skb,
+ 1, 0x10000, GFP_ATOMIC);
+ spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
+
+ if (id < 0) {
+ kfree_skb(ack_skb);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ IEEE80211_SKB_CB(skb)->ack_frame_id = id;
+
+ *cookie = ieee80211_mgmt_tx_cookie(local);
+ IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie;
+
+ return ack_skb;
+}
+
static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb;
+ struct sk_buff *skb, *ack_skb;
struct sta_info *sta;
const struct ieee80211_mgmt *mgmt = (void *)params->buf;
bool need_offchan = false;
@@ -3299,8 +3337,14 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
- if (!sdata->u.mgd.associated)
+ sdata_lock(sdata);
+ if (!sdata->u.mgd.associated ||
+ (params->offchan && params->wait &&
+ local->ops->remain_on_channel &&
+ memcmp(sdata->u.mgd.associated->bssid,
+ mgmt->bssid, ETH_ALEN)))
need_offchan = true;
+ sdata_unlock(sdata);
break;
case NL80211_IFTYPE_P2P_DEVICE:
need_offchan = true;
@@ -3356,6 +3400,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
/* Update CSA counters */
if (sdata->vif.csa_active &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
params->n_csa_offsets) {
int i;
@@ -3382,8 +3427,23 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
skb->dev = sdata->dev;
+ if (!params->dont_wait_for_ack) {
+ /* make a copy to preserve the frame contents
+ * in case of encryption.
+ */
+ ack_skb = ieee80211_make_ack_skb(local, skb, cookie,
+ GFP_KERNEL);
+ if (IS_ERR(ack_skb)) {
+ ret = PTR_ERR(ack_skb);
+ kfree_skb(skb);
+ goto out_unlock;
+ }
+ } else {
+ /* for cookie below */
+ ack_skb = skb;
+ }
+
if (!need_offchan) {
- *cookie = (unsigned long) skb;
ieee80211_tx_skb(sdata, skb);
ret = 0;
goto out_unlock;
@@ -3391,7 +3451,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
- if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
IEEE80211_SKB_CB(skb)->hw_queue =
local->hw.offchannel_tx_hw_queue;
@@ -3476,7 +3536,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct ieee80211_qos_hdr *nullfunc;
- struct sk_buff *skb;
+ struct sk_buff *skb, *ack_skb;
int size = sizeof(*nullfunc);
__le16 fc;
bool qos;
@@ -3484,20 +3544,24 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_band band;
+ int ret;
+
+ /* the lock is needed to assign the cookie later */
+ mutex_lock(&local->mtx);
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
- rcu_read_unlock();
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
band = chanctx_conf->def.chan->band;
sta = sta_info_get_bss(sdata, peer);
if (sta) {
qos = sta->sta.wme;
} else {
- rcu_read_unlock();
- return -ENOLINK;
+ ret = -ENOLINK;
+ goto unlock;
}
if (qos) {
@@ -3513,8 +3577,8 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
if (!skb) {
- rcu_read_unlock();
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unlock;
}
skb->dev = dev;
@@ -3540,13 +3604,23 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
if (qos)
nullfunc->qos_ctrl = cpu_to_le16(7);
+ ack_skb = ieee80211_make_ack_skb(local, skb, cookie, GFP_ATOMIC);
+ if (IS_ERR(ack_skb)) {
+ kfree_skb(skb);
+ ret = PTR_ERR(ack_skb);
+ goto unlock;
+ }
+
local_bh_disable();
ieee80211_xmit(sdata, sta, skb);
local_bh_enable();
+
+ ret = 0;
+unlock:
rcu_read_unlock();
+ mutex_unlock(&local->mtx);
- *cookie = (unsigned long) skb;
- return 0;
+ return ret;
}
static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 5bcd4e5589d3..f01c18a3160e 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -664,6 +664,8 @@ out:
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_IDLE);
+ ieee80211_check_fast_xmit_iface(sdata);
+
return ret;
}
@@ -1008,6 +1010,8 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
if (WARN_ON(!chandef))
return -EINVAL;
+ ieee80211_change_chanctx(local, new_ctx, chandef);
+
vif_chsw[0].vif = &sdata->vif;
vif_chsw[0].old_ctx = &old_ctx->conf;
vif_chsw[0].new_ctx = &new_ctx->conf;
@@ -1030,6 +1034,8 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
if (sdata->vif.type == NL80211_IFTYPE_AP)
__ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+ ieee80211_check_fast_xmit_iface(sdata);
+
if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
ieee80211_free_chanctx(local, old_ctx);
@@ -1079,6 +1085,8 @@ ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
if (WARN_ON(!chandef))
return -EINVAL;
+ ieee80211_change_chanctx(local, new_ctx, chandef);
+
list_del(&sdata->reserved_chanctx_list);
sdata->reserved_chanctx = NULL;
@@ -1376,6 +1384,8 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
__ieee80211_vif_copy_chanctx_to_vlans(sdata,
false);
+ ieee80211_check_fast_xmit_iface(sdata);
+
sdata->radar_required = sdata->reserved_radar_required;
if (sdata->vif.bss_conf.chandef.width !=
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 23813ebb349c..3ea8b7de9633 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -1,4 +1,3 @@
-
/*
* mac80211 debugfs for wireless PHYs
*
@@ -92,62 +91,66 @@ static const struct file_operations reset_ops = {
};
#endif
+static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = {
+#define FLAG(F) [IEEE80211_HW_##F] = #F
+ FLAG(HAS_RATE_CONTROL),
+ FLAG(RX_INCLUDES_FCS),
+ FLAG(HOST_BROADCAST_PS_BUFFERING),
+ FLAG(SIGNAL_UNSPEC),
+ FLAG(SIGNAL_DBM),
+ FLAG(NEED_DTIM_BEFORE_ASSOC),
+ FLAG(SPECTRUM_MGMT),
+ FLAG(AMPDU_AGGREGATION),
+ FLAG(SUPPORTS_PS),
+ FLAG(PS_NULLFUNC_STACK),
+ FLAG(SUPPORTS_DYNAMIC_PS),
+ FLAG(MFP_CAPABLE),
+ FLAG(WANT_MONITOR_VIF),
+ FLAG(NO_AUTO_VIF),
+ FLAG(SW_CRYPTO_CONTROL),
+ FLAG(SUPPORT_FAST_XMIT),
+ FLAG(REPORTS_TX_ACK_STATUS),
+ FLAG(CONNECTION_MONITOR),
+ FLAG(QUEUE_CONTROL),
+ FLAG(SUPPORTS_PER_STA_GTK),
+ FLAG(AP_LINK_PS),
+ FLAG(TX_AMPDU_SETUP_IN_HW),
+ FLAG(SUPPORTS_RC_TABLE),
+ FLAG(P2P_DEV_ADDR_FOR_INTF),
+ FLAG(TIMING_BEACON_ONLY),
+ FLAG(SUPPORTS_HT_CCK_RATES),
+ FLAG(CHANCTX_STA_CSA),
+ FLAG(SUPPORTS_CLONED_SKBS),
+ FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+
+ /* keep last for the build bug below */
+ (void *)0x1
+#undef FLAG
+};
+
static ssize_t hwflags_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- int mxln = 500;
+ size_t bufsz = 30 * NUM_IEEE80211_HW_FLAGS;
+ char *buf = kzalloc(bufsz, GFP_KERNEL);
+ char *pos = buf, *end = buf + bufsz - 1;
ssize_t rv;
- char *buf = kzalloc(mxln, GFP_KERNEL);
- int sf = 0; /* how many written so far */
+ int i;
if (!buf)
- return 0;
-
- sf += scnprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
- if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
- sf += scnprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
- if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
- sf += scnprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
- if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
- sf += scnprintf(buf + sf, mxln - sf,
- "HOST_BCAST_PS_BUFFERING\n");
- if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
- sf += scnprintf(buf + sf, mxln - sf,
- "2GHZ_SHORT_SLOT_INCAPABLE\n");
- if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
- sf += scnprintf(buf + sf, mxln - sf,
- "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
- if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
- sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
- sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
- if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
- sf += scnprintf(buf + sf, mxln - sf,
- "NEED_DTIM_BEFORE_ASSOC\n");
- if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
- sf += scnprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
- if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
- sf += scnprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
- sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
- sf += scnprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
- sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
- if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
- sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- sf += scnprintf(buf + sf, mxln - sf,
- "REPORTS_TX_ACK_STATUS\n");
- if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
- sf += scnprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
- sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
- if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
- sf += scnprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
- if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
- sf += scnprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
+ return -ENOMEM;
+
+ /* fail compilation if somebody adds or removes
+ * a flag without updating the name array above
+ */
+ BUILD_BUG_ON(hw_flag_names[NUM_IEEE80211_HW_FLAGS] != (void *)0x1);
+
+ for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
+ if (test_bit(i, local->hw.flags))
+ pos += scnprintf(pos, end - pos, "%s",
+ hw_flag_names[i]);
+ }
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
@@ -219,8 +222,8 @@ static const struct file_operations stats_ ##name## _ops = { \
.llseek = generic_file_llseek, \
};
-#define DEBUGFS_STATS_ADD(name, field) \
- debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
+#define DEBUGFS_STATS_ADD(name) \
+ debugfs_create_u32(#name, 0400, statsd, &local->name);
#define DEBUGFS_DEVSTATS_ADD(name) \
debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
@@ -255,53 +258,31 @@ void debugfs_hw_add(struct ieee80211_local *local)
if (!statsd)
return;
- DEBUGFS_STATS_ADD(transmitted_fragment_count,
- local->dot11TransmittedFragmentCount);
- DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
- local->dot11MulticastTransmittedFrameCount);
- DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
- DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
- DEBUGFS_STATS_ADD(multiple_retry_count,
- local->dot11MultipleRetryCount);
- DEBUGFS_STATS_ADD(frame_duplicate_count,
- local->dot11FrameDuplicateCount);
- DEBUGFS_STATS_ADD(received_fragment_count,
- local->dot11ReceivedFragmentCount);
- DEBUGFS_STATS_ADD(multicast_received_frame_count,
- local->dot11MulticastReceivedFrameCount);
- DEBUGFS_STATS_ADD(transmitted_frame_count,
- local->dot11TransmittedFrameCount);
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
- DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
- DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
- DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
- local->tx_handlers_drop_fragment);
- DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
- local->tx_handlers_drop_wep);
- DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
- local->tx_handlers_drop_not_assoc);
- DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
- local->tx_handlers_drop_unauth_port);
- DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
- DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
- DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
- local->rx_handlers_drop_nullfunc);
- DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
- local->rx_handlers_drop_defrag);
- DEBUGFS_STATS_ADD(rx_handlers_drop_short,
- local->rx_handlers_drop_short);
- DEBUGFS_STATS_ADD(tx_expand_skb_head,
- local->tx_expand_skb_head);
- DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
- local->tx_expand_skb_head_cloned);
- DEBUGFS_STATS_ADD(rx_expand_skb_head,
- local->rx_expand_skb_head);
- DEBUGFS_STATS_ADD(rx_expand_skb_head2,
- local->rx_expand_skb_head2);
- DEBUGFS_STATS_ADD(rx_handlers_fragments,
- local->rx_handlers_fragments);
- DEBUGFS_STATS_ADD(tx_status_drop,
- local->tx_status_drop);
+ DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount);
+ DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount);
+ DEBUGFS_STATS_ADD(dot11FailedCount);
+ DEBUGFS_STATS_ADD(dot11RetryCount);
+ DEBUGFS_STATS_ADD(dot11MultipleRetryCount);
+ DEBUGFS_STATS_ADD(dot11FrameDuplicateCount);
+ DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount);
+ DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount);
+ DEBUGFS_STATS_ADD(dot11TransmittedFrameCount);
+ DEBUGFS_STATS_ADD(tx_handlers_drop);
+ DEBUGFS_STATS_ADD(tx_handlers_queued);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_wep);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc);
+ DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port);
+ DEBUGFS_STATS_ADD(rx_handlers_drop);
+ DEBUGFS_STATS_ADD(rx_handlers_queued);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
+ DEBUGFS_STATS_ADD(rx_handlers_drop_short);
+ DEBUGFS_STATS_ADD(tx_expand_skb_head);
+ DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
+ DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
+ DEBUGFS_STATS_ADD(rx_handlers_fragments);
+ DEBUGFS_STATS_ADD(tx_status_drop);
#endif
DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 71ac1b5f4da5..e82bf1e9d7a8 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -95,28 +95,13 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
- pn = atomic64_read(&key->u.ccmp.tx_pn);
- len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
- (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
- (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
- break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- pn = atomic64_read(&key->u.aes_cmac.tx_pn);
- len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
- (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
- (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
- break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- pn = atomic64_read(&key->u.aes_gmac.tx_pn);
- len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
- (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
- (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
- break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- pn = atomic64_read(&key->u.gcmp.tx_pn);
+ pn = atomic64_read(&key->conf.tx_pn);
len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
(u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
(u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 252859e90e8a..06d52935036d 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -29,8 +29,6 @@ static ssize_t sta_ ##name## _read(struct file *file, \
format_string, sta->field); \
}
#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
-#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
-#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
#define STA_OPS(name) \
static const struct file_operations sta_ ##name## _ops = { \
@@ -52,10 +50,7 @@ static const struct file_operations sta_ ##name## _ops = { \
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
-STA_FILE(dev, sdata->name, S);
-STA_FILE(last_signal, last_signal, D);
STA_FILE(last_ack_signal, last_ack_signal, D);
-STA_FILE(beacon_loss_count, beacon_loss_count, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -101,40 +96,6 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
}
STA_OPS(num_ps_buf_frames);
-static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct sta_info *sta = file->private_data;
- return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
- jiffies_to_msecs(jiffies - sta->last_rx));
-}
-STA_OPS(inactive_ms);
-
-
-static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct sta_info *sta = file->private_data;
- struct timespec uptime;
- struct tm result;
- long connected_time_secs;
- char buf[100];
- int res;
- ktime_get_ts(&uptime);
- connected_time_secs = uptime.tv_sec - sta->last_connected;
- time_to_tm(connected_time_secs, 0, &result);
- result.tm_year -= 70;
- result.tm_mday -= 1;
- res = scnprintf(buf, sizeof(buf),
- "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n",
- result.tm_year, result.tm_mon, result.tm_mday,
- result.tm_hour, result.tm_min, result.tm_sec);
- return simple_read_from_buffer(userbuf, count, ppos, buf, res);
-}
-STA_OPS(connected_time);
-
-
-
static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -359,37 +320,6 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
}
STA_OPS(vht_capa);
-static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct sta_info *sta = file->private_data;
- struct rate_info rinfo;
- u16 rate;
- sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
- rate = cfg80211_calculate_bitrate(&rinfo);
-
- return mac80211_format_buffer(userbuf, count, ppos,
- "%d.%d MBit/s\n",
- rate/10, rate%10);
-}
-STA_OPS(current_tx_rate);
-
-static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- struct sta_info *sta = file->private_data;
- struct rate_info rinfo;
- u16 rate;
-
- sta_set_rate_info_rx(sta, &rinfo);
-
- rate = cfg80211_calculate_bitrate(&rinfo);
-
- return mac80211_format_buffer(userbuf, count, ppos,
- "%d.%d MBit/s\n",
- rate/10, rate%10);
-}
-STA_OPS(last_rx_rate);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
@@ -432,30 +362,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(flags);
DEBUGFS_ADD(num_ps_buf_frames);
- DEBUGFS_ADD(inactive_ms);
- DEBUGFS_ADD(connected_time);
DEBUGFS_ADD(last_seq_ctrl);
DEBUGFS_ADD(agg_status);
- DEBUGFS_ADD(dev);
- DEBUGFS_ADD(last_signal);
- DEBUGFS_ADD(beacon_loss_count);
DEBUGFS_ADD(ht_capa);
DEBUGFS_ADD(vht_capa);
DEBUGFS_ADD(last_ack_signal);
- DEBUGFS_ADD(current_tx_rate);
- DEBUGFS_ADD(last_rx_rate);
- DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
- DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
- DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
- DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
- DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
- DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
- DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
- DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 26e1ca8a474a..32a2e707e222 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -146,7 +146,7 @@ static inline int drv_add_interface(struct ieee80211_local *local,
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
- !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF) &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
return -EINVAL;
@@ -417,12 +417,13 @@ static inline int drv_get_stats(struct ieee80211_local *local,
return ret;
}
-static inline void drv_get_tkip_seq(struct ieee80211_local *local,
- u8 hw_key_idx, u32 *iv32, u16 *iv16)
+static inline void drv_get_key_seq(struct ieee80211_local *local,
+ struct ieee80211_key *key,
+ struct ieee80211_key_seq *seq)
{
- if (local->ops->get_tkip_seq)
- local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16);
- trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16);
+ if (local->ops->get_key_seq)
+ local->ops->get_key_seq(&local->hw, &key->conf, seq);
+ trace_drv_get_key_seq(local, &key->conf);
}
static inline int drv_set_frag_threshold(struct ieee80211_local *local,
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 52bcea6ad9e8..188faab11c24 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -38,7 +38,7 @@ static void ieee80211_get_ringparam(struct net_device *dev,
static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "rx_bytes",
"rx_duplicates", "rx_fragments", "rx_dropped",
- "tx_packets", "tx_bytes", "tx_fragments",
+ "tx_packets", "tx_bytes",
"tx_filtered", "tx_retry_failed", "tx_retries",
"beacon_loss", "sta_state", "txrate", "rxrate", "signal",
"channel", "noise", "ch_time", "ch_time_busy",
@@ -87,7 +87,6 @@ static void ieee80211_get_stats(struct net_device *dev,
\
data[i++] += sinfo.tx_packets; \
data[i++] += sinfo.tx_bytes; \
- data[i++] += sta->tx_fragments; \
data[i++] += sta->tx_filtered_count; \
data[i++] += sta->tx_retry_failed; \
data[i++] += sta->tx_retry_count; \
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index bfef1b215050..7f72bc9bae2e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -146,6 +146,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
csa_settings->chandef.chan->center_freq);
presp->csa_counter_offsets[0] = (pos - presp->head);
*pos++ = csa_settings->count;
+ presp->csa_current_counter = csa_settings->count;
}
/* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
@@ -1031,8 +1032,11 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
}
}
- if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS)
+ if (sta && !sta->sta.wme &&
+ elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) {
sta->sta.wme = true;
+ ieee80211_check_fast_xmit(sta);
+ }
if (sta && elems->ht_operation && elems->ht_cap_elem &&
sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c0a9187bc3a9..b12f61507f9f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -181,8 +181,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
/**
* enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
- * (incl. multicast frames)
* @IEEE80211_RX_FRAGMENTED: fragmented frame
* @IEEE80211_RX_AMSDU: a-MSDU packet
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
@@ -192,7 +190,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* @rx_flags field of &struct ieee80211_rx_status.
*/
enum ieee80211_packet_rx_flags {
- IEEE80211_RX_RA_MATCH = BIT(1),
IEEE80211_RX_FRAGMENTED = BIT(2),
IEEE80211_RX_AMSDU = BIT(3),
IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4),
@@ -722,7 +719,6 @@ struct ieee80211_if_mesh {
* enum ieee80211_sub_if_data_flags - virtual interface flags
*
* @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
- * @IEEE80211_SDATA_PROMISC: interface is promisc
* @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
* @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
* associated stations and deliver multicast frames both
@@ -732,7 +728,6 @@ struct ieee80211_if_mesh {
*/
enum ieee80211_sub_if_data_flags {
IEEE80211_SDATA_ALLMULTI = BIT(0),
- IEEE80211_SDATA_PROMISC = BIT(1),
IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
@@ -1040,7 +1035,6 @@ enum queue_stop_reason {
#ifdef CONFIG_MAC80211_LEDS
struct tpt_led_trigger {
- struct led_trigger trig;
char name[32];
const struct ieee80211_tpt_blink *blink_table;
unsigned int blink_table_len;
@@ -1208,8 +1202,8 @@ struct ieee80211_local {
atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
- /* number of interfaces with corresponding IFF_ flags */
- atomic_t iff_allmultis, iff_promiscs;
+ /* number of interfaces with allmulti RX */
+ atomic_t iff_allmultis;
struct rate_control_ref *rate_ctrl;
@@ -1261,6 +1255,15 @@ struct ieee80211_local {
struct list_head chanctx_list;
struct mutex chanctx_mtx;
+#ifdef CONFIG_MAC80211_LEDS
+ struct led_trigger tx_led, rx_led, assoc_led, radio_led;
+ struct led_trigger tpt_led;
+ atomic_t tx_led_active, rx_led_active, assoc_led_active;
+ atomic_t radio_led_active, tpt_led_active;
+ struct tpt_led_trigger *tpt_led_trigger;
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
/* SNMP counters */
/* dot11CountersTable */
u32 dot11TransmittedFragmentCount;
@@ -1273,18 +1276,9 @@ struct ieee80211_local {
u32 dot11MulticastReceivedFrameCount;
u32 dot11TransmittedFrameCount;
-#ifdef CONFIG_MAC80211_LEDS
- struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
- struct tpt_led_trigger *tpt_led_trigger;
- char tx_led_name[32], rx_led_name[32],
- assoc_led_name[32], radio_led_name[32];
-#endif
-
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
/* TX/RX handler statistics */
unsigned int tx_handlers_drop;
unsigned int tx_handlers_queued;
- unsigned int tx_handlers_drop_fragment;
unsigned int tx_handlers_drop_wep;
unsigned int tx_handlers_drop_not_assoc;
unsigned int tx_handlers_drop_unauth_port;
@@ -1295,8 +1289,7 @@ struct ieee80211_local {
unsigned int rx_handlers_drop_short;
unsigned int tx_expand_skb_head;
unsigned int tx_expand_skb_head_cloned;
- unsigned int rx_expand_skb_head;
- unsigned int rx_expand_skb_head2;
+ unsigned int rx_expand_skb_head_defrag;
unsigned int rx_handlers_fragments;
unsigned int tx_status_drop;
#define I802_DEBUG_INC(c) (c)++
@@ -1648,6 +1641,11 @@ struct sk_buff *
ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u32 info_flags);
+void ieee80211_check_fast_xmit(struct sta_info *sta);
+void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
+void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_clear_fast_xmit(struct sta_info *sta);
+
/* HT */
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 84cef600c573..ed1edac14372 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -338,7 +338,7 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
if ((iftype != NL80211_IFTYPE_AP &&
iftype != NL80211_IFTYPE_P2P_GO &&
iftype != NL80211_IFTYPE_MESH_POINT) ||
- !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
+ !ieee80211_hw_check(&sdata->local->hw, QUEUE_CONTROL)) {
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
return 0;
}
@@ -378,7 +378,7 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
else if (local->hw.queues >= IEEE80211_NUM_ACS)
sdata->vif.hw_queue[i] = i;
@@ -393,7 +393,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int ret;
- if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
return 0;
ASSERT_RTNL();
@@ -454,7 +454,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
return;
ASSERT_RTNL();
@@ -703,9 +703,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
atomic_inc(&local->iff_allmultis);
- if (sdata->flags & IEEE80211_SDATA_PROMISC)
- atomic_inc(&local->iff_promiscs);
-
if (coming_up)
local->open_count++;
@@ -835,13 +832,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
(sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
- /* don't count this interface for promisc/allmulti while it is down */
+ /* don't count this interface for allmulti while it is down */
if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
atomic_dec(&local->iff_allmultis);
- if (sdata->flags & IEEE80211_SDATA_PROMISC)
- atomic_dec(&local->iff_promiscs);
-
if (sdata->vif.type == NL80211_IFTYPE_AP) {
local->fif_pspoll--;
local->fif_probe_req--;
@@ -1055,12 +1049,10 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- int allmulti, promisc, sdata_allmulti, sdata_promisc;
+ int allmulti, sdata_allmulti;
allmulti = !!(dev->flags & IFF_ALLMULTI);
- promisc = !!(dev->flags & IFF_PROMISC);
sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
- sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
if (allmulti != sdata_allmulti) {
if (dev->flags & IFF_ALLMULTI)
@@ -1070,13 +1062,6 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
}
- if (promisc != sdata_promisc) {
- if (dev->flags & IFF_PROMISC)
- atomic_inc(&local->iff_promiscs);
- else
- atomic_dec(&local->iff_promiscs);
- sdata->flags ^= IEEE80211_SDATA_PROMISC;
- }
spin_lock_bh(&local->filter_lock);
__hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
spin_unlock_bh(&local->filter_lock);
@@ -1117,6 +1102,35 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
+static struct rtnl_link_stats64 *
+ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *tstats;
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ tstats = per_cpu_ptr(dev->tstats, i);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->tx_packets += tx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ return stats;
+}
+
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
@@ -1126,6 +1140,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
+ .ndo_get_stats64 = ieee80211_get_stats64,
};
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
@@ -1159,14 +1174,21 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_monitor_select_queue,
+ .ndo_get_stats64 = ieee80211_get_stats64,
};
+static void ieee80211_if_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ieee80211_if_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &ieee80211_dataif_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ieee80211_if_free;
}
static void ieee80211_iface_work(struct work_struct *work)
@@ -1564,7 +1586,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
- if (local->hw.flags & IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF) {
+ if (ieee80211_hw_check(&local->hw, P2P_DEV_ADDR_FOR_INTF)) {
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE)
continue;
@@ -1707,6 +1729,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
+ ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!ndev->tstats) {
+ free_netdev(ndev);
+ return -ENOMEM;
+ }
+
ndev->needed_headroom = local->tx_headroom +
4*6 /* four MAC addresses */
+ 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a907f2d5c12d..8abc31ebcf61 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -147,7 +147,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
* is supported; if not, return.
*/
if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) &&
- !(key->local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK))
+ !ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK))
goto out_unsupported;
if (sta && !sta->uploaded)
@@ -201,7 +201,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
/* all of these we can do in software - if driver can */
if (ret == 1)
return 0;
- if (key->local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL)
+ if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
return -EINVAL;
return 0;
default:
@@ -256,6 +256,7 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
if (uni) {
rcu_assign_pointer(sdata->default_unicast_key, key);
+ ieee80211_check_fast_xmit_iface(sdata);
drv_set_default_unicast_key(sdata->local, sdata, idx);
}
@@ -325,6 +326,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
if (pairwise) {
rcu_assign_pointer(sta->ptk[idx], new);
sta->ptk_idx = idx;
+ ieee80211_check_fast_xmit(sta);
} else {
rcu_assign_pointer(sta->gtk[idx], new);
sta->gtk_idx = idx;
@@ -510,15 +512,17 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
break;
default:
if (cs) {
- size_t len = (seq_len > MAX_PN_LEN) ?
- MAX_PN_LEN : seq_len;
+ if (seq_len && seq_len != cs->pn_len) {
+ kfree(key);
+ return ERR_PTR(-EINVAL);
+ }
key->conf.iv_len = cs->hdr_len;
key->conf.icv_len = cs->mic_len;
for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
- for (j = 0; j < len; j++)
+ for (j = 0; j < seq_len; j++)
key->u.gen.rx_pn[i][j] =
- seq[len - j - 1];
+ seq[seq_len - j - 1];
key->flags |= KEY_FLAG_CIPHER_SCHEME;
}
}
@@ -892,27 +896,19 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
- pn64 = atomic64_read(&key->u.ccmp.tx_pn);
- seq->ccmp.pn[5] = pn64;
- seq->ccmp.pn[4] = pn64 >> 8;
- seq->ccmp.pn[3] = pn64 >> 16;
- seq->ccmp.pn[2] = pn64 >> 24;
- seq->ccmp.pn[1] = pn64 >> 32;
- seq->ccmp.pn[0] = pn64 >> 40;
- break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
- seq->ccmp.pn[5] = pn64;
- seq->ccmp.pn[4] = pn64 >> 8;
- seq->ccmp.pn[3] = pn64 >> 16;
- seq->ccmp.pn[2] = pn64 >> 24;
- seq->ccmp.pn[1] = pn64 >> 32;
- seq->ccmp.pn[0] = pn64 >> 40;
- break;
+ BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ offsetof(typeof(*seq), aes_cmac));
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+ BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ offsetof(typeof(*seq), aes_gmac));
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ offsetof(typeof(*seq), gcmp));
+ pn64 = atomic64_read(&key->conf.tx_pn);
seq->ccmp.pn[5] = pn64;
seq->ccmp.pn[4] = pn64 >> 8;
seq->ccmp.pn[3] = pn64 >> 16;
@@ -920,16 +916,6 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
seq->ccmp.pn[1] = pn64 >> 32;
seq->ccmp.pn[0] = pn64 >> 40;
break;
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- pn64 = atomic64_read(&key->u.gcmp.tx_pn);
- seq->gcmp.pn[5] = pn64;
- seq->gcmp.pn[4] = pn64 >> 8;
- seq->gcmp.pn[3] = pn64 >> 16;
- seq->gcmp.pn[2] = pn64 >> 24;
- seq->gcmp.pn[1] = pn64 >> 32;
- seq->gcmp.pn[0] = pn64 >> 40;
- break;
default:
WARN_ON(1);
}
@@ -1004,43 +990,25 @@ void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
- pn64 = (u64)seq->ccmp.pn[5] |
- ((u64)seq->ccmp.pn[4] << 8) |
- ((u64)seq->ccmp.pn[3] << 16) |
- ((u64)seq->ccmp.pn[2] << 24) |
- ((u64)seq->ccmp.pn[1] << 32) |
- ((u64)seq->ccmp.pn[0] << 40);
- atomic64_set(&key->u.ccmp.tx_pn, pn64);
- break;
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- pn64 = (u64)seq->aes_cmac.pn[5] |
- ((u64)seq->aes_cmac.pn[4] << 8) |
- ((u64)seq->aes_cmac.pn[3] << 16) |
- ((u64)seq->aes_cmac.pn[2] << 24) |
- ((u64)seq->aes_cmac.pn[1] << 32) |
- ((u64)seq->aes_cmac.pn[0] << 40);
- atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
- break;
+ BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ offsetof(typeof(*seq), aes_cmac));
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- pn64 = (u64)seq->aes_gmac.pn[5] |
- ((u64)seq->aes_gmac.pn[4] << 8) |
- ((u64)seq->aes_gmac.pn[3] << 16) |
- ((u64)seq->aes_gmac.pn[2] << 24) |
- ((u64)seq->aes_gmac.pn[1] << 32) |
- ((u64)seq->aes_gmac.pn[0] << 40);
- atomic64_set(&key->u.aes_gmac.tx_pn, pn64);
- break;
+ BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ offsetof(typeof(*seq), aes_gmac));
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- pn64 = (u64)seq->gcmp.pn[5] |
- ((u64)seq->gcmp.pn[4] << 8) |
- ((u64)seq->gcmp.pn[3] << 16) |
- ((u64)seq->gcmp.pn[2] << 24) |
- ((u64)seq->gcmp.pn[1] << 32) |
- ((u64)seq->gcmp.pn[0] << 40);
- atomic64_set(&key->u.gcmp.tx_pn, pn64);
+ BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) !=
+ offsetof(typeof(*seq), gcmp));
+ pn64 = (u64)seq->ccmp.pn[5] |
+ ((u64)seq->ccmp.pn[4] << 8) |
+ ((u64)seq->ccmp.pn[3] << 16) |
+ ((u64)seq->ccmp.pn[2] << 24) |
+ ((u64)seq->ccmp.pn[1] << 32) |
+ ((u64)seq->ccmp.pn[0] << 40);
+ atomic64_set(&key->conf.tx_pn, pn64);
break;
default:
WARN_ON(1);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 96557dd1e77d..3f4f9eaac140 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -18,7 +18,6 @@
#define NUM_DEFAULT_KEYS 4
#define NUM_DEFAULT_MGMT_KEYS 2
-#define MAX_PN_LEN 16
struct ieee80211_local;
struct ieee80211_sub_if_data;
@@ -78,7 +77,6 @@ struct ieee80211_key {
u32 mic_failures;
} tkip;
struct {
- atomic64_t tx_pn;
/*
* Last received packet number. The first
* IEEE80211_NUM_TIDS counters are used with Data
@@ -90,21 +88,18 @@ struct ieee80211_key {
u32 replays; /* dot11RSNAStatsCCMPReplays */
} ccmp;
struct {
- atomic64_t tx_pn;
u8 rx_pn[IEEE80211_CMAC_PN_LEN];
struct crypto_cipher *tfm;
u32 replays; /* dot11RSNAStatsCMACReplays */
u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
} aes_cmac;
struct {
- atomic64_t tx_pn;
u8 rx_pn[IEEE80211_GMAC_PN_LEN];
struct crypto_aead *tfm;
u32 replays; /* dot11RSNAStatsCMACReplays */
u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
} aes_gmac;
struct {
- atomic64_t tx_pn;
/* Last received packet number. The first
* IEEE80211_NUM_TIDS counters are used with Data
* frames and the last counter is used with Robust
@@ -116,7 +111,7 @@ struct ieee80211_key {
} gcmp;
struct {
/* generic cipher scheme */
- u8 rx_pn[IEEE80211_NUM_TIDS + 1][MAX_PN_LEN];
+ u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_MAX_PN_LEN];
} gen;
} u;
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index e2b836446af3..0505845b7ab8 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -12,96 +12,175 @@
#include <linux/export.h>
#include "led.h"
-#define MAC80211_BLINK_DELAY 50 /* ms */
-
-void ieee80211_led_rx(struct ieee80211_local *local)
-{
- unsigned long led_delay = MAC80211_BLINK_DELAY;
- if (unlikely(!local->rx_led))
- return;
- led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
-}
-
-void ieee80211_led_tx(struct ieee80211_local *local)
-{
- unsigned long led_delay = MAC80211_BLINK_DELAY;
- if (unlikely(!local->tx_led))
- return;
- led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
-}
-
void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
{
- if (unlikely(!local->assoc_led))
+ if (!atomic_read(&local->assoc_led_active))
return;
if (associated)
- led_trigger_event(local->assoc_led, LED_FULL);
+ led_trigger_event(&local->assoc_led, LED_FULL);
else
- led_trigger_event(local->assoc_led, LED_OFF);
+ led_trigger_event(&local->assoc_led, LED_OFF);
}
void ieee80211_led_radio(struct ieee80211_local *local, bool enabled)
{
- if (unlikely(!local->radio_led))
+ if (!atomic_read(&local->radio_led_active))
return;
if (enabled)
- led_trigger_event(local->radio_led, LED_FULL);
+ led_trigger_event(&local->radio_led, LED_FULL);
else
- led_trigger_event(local->radio_led, LED_OFF);
+ led_trigger_event(&local->radio_led, LED_OFF);
+}
+
+void ieee80211_alloc_led_names(struct ieee80211_local *local)
+{
+ local->rx_led.name = kasprintf(GFP_KERNEL, "%srx",
+ wiphy_name(local->hw.wiphy));
+ local->tx_led.name = kasprintf(GFP_KERNEL, "%stx",
+ wiphy_name(local->hw.wiphy));
+ local->assoc_led.name = kasprintf(GFP_KERNEL, "%sassoc",
+ wiphy_name(local->hw.wiphy));
+ local->radio_led.name = kasprintf(GFP_KERNEL, "%sradio",
+ wiphy_name(local->hw.wiphy));
+}
+
+void ieee80211_free_led_names(struct ieee80211_local *local)
+{
+ kfree(local->rx_led.name);
+ kfree(local->tx_led.name);
+ kfree(local->assoc_led.name);
+ kfree(local->radio_led.name);
+}
+
+static void ieee80211_tx_led_activate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ tx_led);
+
+ atomic_inc(&local->tx_led_active);
+}
+
+static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ tx_led);
+
+ atomic_dec(&local->tx_led_active);
+}
+
+static void ieee80211_rx_led_activate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ rx_led);
+
+ atomic_inc(&local->rx_led_active);
+}
+
+static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ rx_led);
+
+ atomic_dec(&local->rx_led_active);
+}
+
+static void ieee80211_assoc_led_activate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ assoc_led);
+
+ atomic_inc(&local->assoc_led_active);
+}
+
+static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ assoc_led);
+
+ atomic_dec(&local->assoc_led_active);
+}
+
+static void ieee80211_radio_led_activate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ radio_led);
+
+ atomic_inc(&local->radio_led_active);
+}
+
+static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ radio_led);
+
+ atomic_dec(&local->radio_led_active);
+}
+
+static void ieee80211_tpt_led_activate(struct led_classdev *led_cdev)
+{
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ tpt_led);
+
+ atomic_inc(&local->tpt_led_active);
}
-void ieee80211_led_names(struct ieee80211_local *local)
+static void ieee80211_tpt_led_deactivate(struct led_classdev *led_cdev)
{
- snprintf(local->rx_led_name, sizeof(local->rx_led_name),
- "%srx", wiphy_name(local->hw.wiphy));
- snprintf(local->tx_led_name, sizeof(local->tx_led_name),
- "%stx", wiphy_name(local->hw.wiphy));
- snprintf(local->assoc_led_name, sizeof(local->assoc_led_name),
- "%sassoc", wiphy_name(local->hw.wiphy));
- snprintf(local->radio_led_name, sizeof(local->radio_led_name),
- "%sradio", wiphy_name(local->hw.wiphy));
+ struct ieee80211_local *local = container_of(led_cdev->trigger,
+ struct ieee80211_local,
+ tpt_led);
+
+ atomic_dec(&local->tpt_led_active);
}
void ieee80211_led_init(struct ieee80211_local *local)
{
- local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
- if (local->rx_led) {
- local->rx_led->name = local->rx_led_name;
- if (led_trigger_register(local->rx_led)) {
- kfree(local->rx_led);
- local->rx_led = NULL;
- }
+ atomic_set(&local->rx_led_active, 0);
+ local->rx_led.activate = ieee80211_rx_led_activate;
+ local->rx_led.deactivate = ieee80211_rx_led_deactivate;
+ if (local->rx_led.name && led_trigger_register(&local->rx_led)) {
+ kfree(local->rx_led.name);
+ local->rx_led.name = NULL;
}
- local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
- if (local->tx_led) {
- local->tx_led->name = local->tx_led_name;
- if (led_trigger_register(local->tx_led)) {
- kfree(local->tx_led);
- local->tx_led = NULL;
- }
+ atomic_set(&local->tx_led_active, 0);
+ local->tx_led.activate = ieee80211_tx_led_activate;
+ local->tx_led.deactivate = ieee80211_tx_led_deactivate;
+ if (local->tx_led.name && led_trigger_register(&local->tx_led)) {
+ kfree(local->tx_led.name);
+ local->tx_led.name = NULL;
}
- local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
- if (local->assoc_led) {
- local->assoc_led->name = local->assoc_led_name;
- if (led_trigger_register(local->assoc_led)) {
- kfree(local->assoc_led);
- local->assoc_led = NULL;
- }
+ atomic_set(&local->assoc_led_active, 0);
+ local->assoc_led.activate = ieee80211_assoc_led_activate;
+ local->assoc_led.deactivate = ieee80211_assoc_led_deactivate;
+ if (local->assoc_led.name && led_trigger_register(&local->assoc_led)) {
+ kfree(local->assoc_led.name);
+ local->assoc_led.name = NULL;
}
- local->radio_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
- if (local->radio_led) {
- local->radio_led->name = local->radio_led_name;
- if (led_trigger_register(local->radio_led)) {
- kfree(local->radio_led);
- local->radio_led = NULL;
- }
+ atomic_set(&local->radio_led_active, 0);
+ local->radio_led.activate = ieee80211_radio_led_activate;
+ local->radio_led.deactivate = ieee80211_radio_led_deactivate;
+ if (local->radio_led.name && led_trigger_register(&local->radio_led)) {
+ kfree(local->radio_led.name);
+ local->radio_led.name = NULL;
}
+ atomic_set(&local->tpt_led_active, 0);
if (local->tpt_led_trigger) {
- if (led_trigger_register(&local->tpt_led_trigger->trig)) {
+ local->tpt_led.activate = ieee80211_tpt_led_activate;
+ local->tpt_led.deactivate = ieee80211_tpt_led_deactivate;
+ if (led_trigger_register(&local->tpt_led)) {
kfree(local->tpt_led_trigger);
local->tpt_led_trigger = NULL;
}
@@ -110,58 +189,50 @@ void ieee80211_led_init(struct ieee80211_local *local)
void ieee80211_led_exit(struct ieee80211_local *local)
{
- if (local->radio_led) {
- led_trigger_unregister(local->radio_led);
- kfree(local->radio_led);
- }
- if (local->assoc_led) {
- led_trigger_unregister(local->assoc_led);
- kfree(local->assoc_led);
- }
- if (local->tx_led) {
- led_trigger_unregister(local->tx_led);
- kfree(local->tx_led);
- }
- if (local->rx_led) {
- led_trigger_unregister(local->rx_led);
- kfree(local->rx_led);
- }
+ if (local->radio_led.name)
+ led_trigger_unregister(&local->radio_led);
+ if (local->assoc_led.name)
+ led_trigger_unregister(&local->assoc_led);
+ if (local->tx_led.name)
+ led_trigger_unregister(&local->tx_led);
+ if (local->rx_led.name)
+ led_trigger_unregister(&local->rx_led);
if (local->tpt_led_trigger) {
- led_trigger_unregister(&local->tpt_led_trigger->trig);
+ led_trigger_unregister(&local->tpt_led);
kfree(local->tpt_led_trigger);
}
}
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- return local->radio_led_name;
+ return local->radio_led.name;
}
EXPORT_SYMBOL(__ieee80211_get_radio_led_name);
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- return local->assoc_led_name;
+ return local->assoc_led.name;
}
EXPORT_SYMBOL(__ieee80211_get_assoc_led_name);
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- return local->tx_led_name;
+ return local->tx_led.name;
}
EXPORT_SYMBOL(__ieee80211_get_tx_led_name);
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
- return local->rx_led_name;
+ return local->rx_led.name;
}
EXPORT_SYMBOL(__ieee80211_get_rx_led_name);
@@ -205,16 +276,17 @@ static void tpt_trig_timer(unsigned long data)
}
}
- read_lock(&tpt_trig->trig.leddev_list_lock);
- list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+ read_lock(&local->tpt_led.leddev_list_lock);
+ list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
led_blink_set(led_cdev, &on, &off);
- read_unlock(&tpt_trig->trig.leddev_list_lock);
+ read_unlock(&local->tpt_led.leddev_list_lock);
}
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
- unsigned int flags,
- const struct ieee80211_tpt_blink *blink_table,
- unsigned int blink_table_len)
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len)
{
struct ieee80211_local *local = hw_to_local(hw);
struct tpt_led_trigger *tpt_trig;
@@ -229,7 +301,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
snprintf(tpt_trig->name, sizeof(tpt_trig->name),
"%stpt", wiphy_name(local->hw.wiphy));
- tpt_trig->trig.name = tpt_trig->name;
+ local->tpt_led.name = tpt_trig->name;
tpt_trig->blink_table = blink_table;
tpt_trig->blink_table_len = blink_table_len;
@@ -269,10 +341,10 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
tpt_trig->running = false;
del_timer_sync(&tpt_trig->timer);
- read_lock(&tpt_trig->trig.leddev_list_lock);
- list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+ read_lock(&local->tpt_led.leddev_list_lock);
+ list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
led_set_brightness(led_cdev, LED_OFF);
- read_unlock(&tpt_trig->trig.leddev_list_lock);
+ read_unlock(&local->tpt_led.leddev_list_lock);
}
void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index 89f4344f13b9..a7893a1ac98b 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -11,25 +11,42 @@
#include <linux/leds.h>
#include "ieee80211_i.h"
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
+static inline void ieee80211_led_rx(struct ieee80211_local *local)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
+
+ if (!atomic_read(&local->rx_led_active))
+ return;
+ led_trigger_blink_oneshot(&local->rx_led, &led_delay, &led_delay, 0);
+#endif
+}
+
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
+{
+#ifdef CONFIG_MAC80211_LEDS
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
+
+ if (!atomic_read(&local->tx_led_active))
+ return;
+ led_trigger_blink_oneshot(&local->tx_led, &led_delay, &led_delay, 0);
+#endif
+}
+
#ifdef CONFIG_MAC80211_LEDS
-void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local);
void ieee80211_led_assoc(struct ieee80211_local *local,
bool associated);
void ieee80211_led_radio(struct ieee80211_local *local,
bool enabled);
-void ieee80211_led_names(struct ieee80211_local *local);
+void ieee80211_alloc_led_names(struct ieee80211_local *local);
+void ieee80211_free_led_names(struct ieee80211_local *local);
void ieee80211_led_init(struct ieee80211_local *local);
void ieee80211_led_exit(struct ieee80211_local *local);
void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
unsigned int types_on, unsigned int types_off);
#else
-static inline void ieee80211_led_rx(struct ieee80211_local *local)
-{
-}
-static inline void ieee80211_led_tx(struct ieee80211_local *local)
-{
-}
static inline void ieee80211_led_assoc(struct ieee80211_local *local,
bool associated)
{
@@ -38,7 +55,10 @@ static inline void ieee80211_led_radio(struct ieee80211_local *local,
bool enabled)
{
}
-static inline void ieee80211_led_names(struct ieee80211_local *local)
+static inline void ieee80211_alloc_led_names(struct ieee80211_local *local)
+{
+}
+static inline void ieee80211_free_led_names(struct ieee80211_local *local)
{
}
static inline void ieee80211_led_init(struct ieee80211_local *local)
@@ -58,7 +78,7 @@ static inline void
ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes)
{
#ifdef CONFIG_MAC80211_LEDS
- if (local->tpt_led_trigger && ieee80211_is_data(fc))
+ if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
local->tpt_led_trigger->tx_bytes += bytes;
#endif
}
@@ -67,7 +87,7 @@ static inline void
ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes)
{
#ifdef CONFIG_MAC80211_LEDS
- if (local->tpt_led_trigger && ieee80211_is_data(fc))
+ if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
local->tpt_led_trigger->rx_bytes += bytes;
#endif
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index df3051d96aff..3c63468b4dfb 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -41,9 +41,6 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
unsigned int changed_flags;
unsigned int new_flags = 0;
- if (atomic_read(&local->iff_promiscs))
- new_flags |= FIF_PROMISC_IN_BSS;
-
if (atomic_read(&local->iff_allmultis))
new_flags |= FIF_ALLMULTI;
@@ -249,6 +246,7 @@ static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, restart_work);
+ struct ieee80211_sub_if_data *sdata;
/* wait for scan work complete */
flush_workqueue(local->workqueue);
@@ -257,6 +255,8 @@ static void ieee80211_restart_work(struct work_struct *work)
"%s called with hardware scan in progress\n", __func__);
rtnl_lock();
+ list_for_each_entry(sdata, &local->interfaces, list)
+ flush_delayed_work(&sdata->dec_tailroom_needed_wk);
ieee80211_scan_cancel(local);
ieee80211_reconfig(local);
rtnl_unlock();
@@ -646,7 +646,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
- ieee80211_led_names(local);
+ ieee80211_alloc_led_names(local);
ieee80211_roc_setup(local);
@@ -661,7 +661,7 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
{
bool have_wep = !(IS_ERR(local->wep_tx_tfm) ||
IS_ERR(local->wep_rx_tfm));
- bool have_mfp = local->hw.flags & IEEE80211_HW_MFP_CAPABLE;
+ bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE);
int n_suites = 0, r = 0, w = 0;
u32 *suites;
static const u32 cipher_suites[] = {
@@ -681,7 +681,7 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
WLAN_CIPHER_SUITE_BIP_GMAC_256,
};
- if (local->hw.flags & IEEE80211_HW_SW_CRYPTO_CONTROL ||
+ if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) ||
local->hw.wiphy->cipher_suites) {
/* If the driver advertises, or doesn't support SW crypto,
* we only need to remove WEP if necessary.
@@ -771,8 +771,13 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
}
- for (r = 0; r < local->hw.n_cipher_schemes; r++)
+ for (r = 0; r < local->hw.n_cipher_schemes; r++) {
suites[w++] = cs[r].cipher;
+ if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) {
+ kfree(suites);
+ return -EINVAL;
+ }
+ }
}
local->hw.wiphy->cipher_suites = suites;
@@ -792,7 +797,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
netdev_features_t feature_whitelist;
struct cfg80211_chan_def dflt_chandef = {};
- if (hw->flags & IEEE80211_HW_QUEUE_CONTROL &&
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
(local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
local->hw.offchannel_tx_hw_queue >= local->hw.queues))
return -EINVAL;
@@ -840,7 +845,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* Only HW csum features are currently compatible with mac80211 */
feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_CSUM;
+ NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_SOFTWARE;
if (WARN_ON(hw->netdev_features & ~feature_whitelist))
return -EINVAL;
@@ -939,9 +945,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* mac80211 supports control port protocol changing */
local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
+ if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- } else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
+ } else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
if (hw->max_signal <= 0) {
result = -EINVAL;
@@ -995,7 +1001,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
/* mac80211 supports eCSA, if the driver supports STA CSA at all */
- if (local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)
+ if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA))
local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING;
local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
@@ -1063,7 +1069,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
- !(hw->flags & IEEE80211_HW_NO_AUTO_VIF)) {
+ !ieee80211_hw_check(hw, NO_AUTO_VIF)) {
result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL,
NL80211_IFTYPE_STATION, NULL);
if (result)
@@ -1209,6 +1215,8 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
sta_info_stop(local);
+ ieee80211_free_led_names(local);
+
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d4684242e78b..817098add1d6 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -680,6 +680,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
*pos++ = 0x0;
*pos++ = ieee80211_frequency_to_channel(
csa->settings.chandef.chan->center_freq);
+ bcn->csa_current_counter = csa->settings.count;
bcn->csa_counter_offsets[0] = hdr_len + 6;
*pos++ = csa->settings.count;
*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 214e63b84e5c..085edc1d056b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -510,14 +510,14 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
- const u8 *preq_elem, u32 metric)
+ const u8 *preq_elem, u32 orig_metric)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath = NULL;
const u8 *target_addr, *orig_addr;
const u8 *da;
u8 target_flags, ttl, flags;
- u32 orig_sn, target_sn, lifetime, orig_metric;
+ u32 orig_sn, target_sn, lifetime, target_metric;
bool reply = false;
bool forward = true;
bool root_is_gate;
@@ -528,7 +528,6 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
target_sn = PREQ_IE_TARGET_SN(preq_elem);
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
- orig_metric = metric;
/* Proactive PREQ gate announcements */
flags = PREQ_IE_FLAGS(preq_elem);
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
@@ -539,7 +538,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg(sdata, "PREQ is for us\n");
forward = false;
reply = true;
- metric = 0;
+ target_metric = 0;
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
@@ -556,7 +555,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
reply = true;
target_addr = sdata->vif.addr;
target_sn = ++ifmsh->sn;
- metric = 0;
+ target_metric = 0;
ifmsh->last_sn_update = jiffies;
}
if (root_is_gate)
@@ -574,7 +573,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
} else if ((!(target_flags & MP_F_DO)) &&
(mpath->flags & MESH_PATH_ACTIVE)) {
reply = true;
- metric = mpath->metric;
+ target_metric = mpath->metric;
target_sn = mpath->sn;
if (target_flags & MP_F_RF)
target_flags |= MP_F_DO;
@@ -593,7 +592,8 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
orig_sn, 0, target_addr,
target_sn, mgmt->sa, 0, ttl,
- lifetime, metric, 0, sdata);
+ lifetime, target_metric, 0,
+ sdata);
} else {
ifmsh->mshstats.dropped_frames_ttl++;
}
@@ -619,13 +619,12 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
target_sn = PREQ_IE_TARGET_SN(preq_elem);
- metric = orig_metric;
}
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
orig_sn, target_flags, target_addr,
target_sn, da, hopcount, ttl, lifetime,
- metric, preq_id, sdata);
+ orig_metric, preq_id, sdata);
if (!is_multicast_ether_addr(da))
ifmsh->mshstats.fwded_unicast++;
else
@@ -854,7 +853,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
{
struct ieee802_11_elems elems;
size_t baselen;
- u32 last_hop_metric;
+ u32 path_metric;
struct sta_info *sta;
/* need action_code */
@@ -877,21 +876,21 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
if (elems.preq_len != 37)
/* Right now we support just 1 destination and no AE */
return;
- last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
- MPATH_PREQ);
- if (last_hop_metric)
+ path_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
+ MPATH_PREQ);
+ if (path_metric)
hwmp_preq_frame_process(sdata, mgmt, elems.preq,
- last_hop_metric);
+ path_metric);
}
if (elems.prep) {
if (elems.prep_len != 31)
/* Right now we support no AE */
return;
- last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
- MPATH_PREP);
- if (last_hop_metric)
+ path_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
+ MPATH_PREP);
+ if (path_metric)
hwmp_prep_frame_process(sdata, mgmt, elems.prep,
- last_hop_metric);
+ path_metric);
}
if (elems.perr) {
if (elems.perr_len != 15)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 60d737f144e3..5438d13e2f00 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -72,10 +72,11 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
*
* @sta: mesh peer link to restart
*
- * Locking: this function must be called holding sta->lock
+ * Locking: this function must be called holding sta->plink_lock
*/
static inline void mesh_plink_fsm_restart(struct sta_info *sta)
{
+ lockdep_assert_held(&sta->plink_lock);
sta->plink_state = NL80211_PLINK_LISTEN;
sta->llid = sta->plid = sta->reason = 0;
sta->plink_retries = 0;
@@ -105,9 +106,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata)
/* (IEEE 802.11-2012 19.4.5) */
short_slot = true;
goto out;
- } else if (band != IEEE80211_BAND_2GHZ ||
- (band == IEEE80211_BAND_2GHZ &&
- local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ } else if (band != IEEE80211_BAND_2GHZ)
goto out;
for (i = 0; i < sband->n_bitrates; i++)
@@ -213,13 +212,15 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
* All mesh paths with this peer as next hop will be flushed
* Returns beacon changed flag if the beacon content changed.
*
- * Locking: the caller must hold sta->lock
+ * Locking: the caller must hold sta->plink_lock
*/
static u32 __mesh_plink_deactivate(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u32 changed = 0;
+ lockdep_assert_held(&sta->plink_lock);
+
if (sta->plink_state == NL80211_PLINK_ESTAB)
changed = mesh_plink_dec_estab_count(sdata);
sta->plink_state = NL80211_PLINK_BLOCKED;
@@ -244,13 +245,13 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata;
u32 changed;
- spin_lock_bh(&sta->lock);
+ spin_lock_bh(&sta->plink_lock);
changed = __mesh_plink_deactivate(sta);
sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
sta->sta.addr, sta->llid, sta->plid,
sta->reason);
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
return changed;
}
@@ -387,12 +388,13 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[band];
rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
- spin_lock_bh(&sta->lock);
+ spin_lock_bh(&sta->plink_lock);
sta->last_rx = jiffies;
/* rates and capabilities don't change during peering */
- if (sta->plink_state == NL80211_PLINK_ESTAB)
+ if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
goto out;
+ sta->processed_beacon = true;
if (sta->sta.supp_rates[band] != rates)
changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -419,7 +421,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
else
rate_control_rate_update(local, sband, sta, changed);
out:
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
}
static struct sta_info *
@@ -552,7 +554,7 @@ static void mesh_plink_timer(unsigned long data)
if (sta->sdata->local->quiescing)
return;
- spin_lock_bh(&sta->lock);
+ spin_lock_bh(&sta->plink_lock);
/* If a timer fires just before a state transition on another CPU,
* we may have already extended the timeout and changed state by the
@@ -563,7 +565,7 @@ static void mesh_plink_timer(unsigned long data)
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer adjusted)",
sta->sta.addr, mplstates[sta->plink_state]);
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
return;
}
@@ -573,7 +575,7 @@ static void mesh_plink_timer(unsigned long data)
mpl_dbg(sta->sdata,
"Ignoring timer for %pM in state %s (timer deleted)",
sta->sta.addr, mplstates[sta->plink_state]);
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
return;
}
@@ -619,7 +621,7 @@ static void mesh_plink_timer(unsigned long data)
default:
break;
}
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
if (action)
mesh_plink_frame_tx(sdata, action, sta->sta.addr,
sta->llid, sta->plid, reason);
@@ -674,16 +676,16 @@ u32 mesh_plink_open(struct sta_info *sta)
if (!test_sta_flag(sta, WLAN_STA_AUTH))
return 0;
- spin_lock_bh(&sta->lock);
+ spin_lock_bh(&sta->plink_lock);
sta->llid = mesh_get_new_llid(sdata);
if (sta->plink_state != NL80211_PLINK_LISTEN &&
sta->plink_state != NL80211_PLINK_BLOCKED) {
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
return 0;
}
sta->plink_state = NL80211_PLINK_OPN_SNT;
mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
mpl_dbg(sdata,
"Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
@@ -700,10 +702,10 @@ u32 mesh_plink_block(struct sta_info *sta)
{
u32 changed;
- spin_lock_bh(&sta->lock);
+ spin_lock_bh(&sta->plink_lock);
changed = __mesh_plink_deactivate(sta);
sta->plink_state = NL80211_PLINK_BLOCKED;
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
return changed;
}
@@ -758,7 +760,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
mplstates[sta->plink_state], mplevents[event]);
- spin_lock_bh(&sta->lock);
+ spin_lock_bh(&sta->plink_lock);
switch (sta->plink_state) {
case NL80211_PLINK_LISTEN:
switch (event) {
@@ -872,7 +874,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
*/
break;
}
- spin_unlock_bh(&sta->lock);
+ spin_unlock_bh(&sta->plink_lock);
if (action) {
mesh_plink_frame_tx(sdata, action, sta->sta.addr,
sta->llid, sta->plid, sta->reason);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 26053bf2faa8..9b2cc278ac2a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -118,7 +118,7 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)
return;
- if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
return;
mod_timer(&sdata->u.mgd.bcn_mon_timer,
@@ -134,7 +134,7 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
ifmgd->probe_send_count = 0;
- if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
return;
mod_timer(&sdata->u.mgd.conn_mon_timer,
@@ -669,17 +669,15 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
capab = WLAN_CAPABILITY_ESS;
if (sband->band == IEEE80211_BAND_2GHZ) {
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
}
if (assoc_data->capability & WLAN_CAPABILITY_PRIVACY)
capab |= WLAN_CAPABILITY_PRIVACY;
if ((assoc_data->capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
- (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
+ ieee80211_hw_check(&local->hw, SPECTRUM_MGMT))
capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM)
@@ -887,7 +885,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
drv_mgd_prepare_tx(local, sdata);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_tx_skb(sdata, skb);
@@ -929,7 +927,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
@@ -1098,6 +1096,24 @@ static void ieee80211_chswitch_timer(unsigned long data)
ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
}
+static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+ struct sta_info *sta;
+ u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+ if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ continue;
+
+ ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+ }
+ rcu_read_unlock();
+}
+
static void
ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
u64 timestamp, u32 device_timestamp,
@@ -1161,6 +1177,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
return;
}
+ /*
+ * Drop all TDLS peers - either we disconnect or move to a different
+ * channel from this point on. There's no telling what our peer will do.
+ * The TDLS WIDER_BW scenario is also problematic, as peers might now
+ * have an incompatible wider chandef.
+ */
+ ieee80211_teardown_tdls_peers(sdata);
+
mutex_lock(&local->mtx);
mutex_lock(&local->chanctx_mtx);
conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -1174,7 +1198,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
if (local->use_chanctx &&
- !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
+ !ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) {
sdata_info(sdata,
"driver doesn't support chan-switch with channel contexts\n");
goto drop_connection;
@@ -1383,15 +1407,15 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
return;
if (conf->dynamic_ps_timeout > 0 &&
- !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)) {
+ !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(conf->dynamic_ps_timeout));
} else {
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+ if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
ieee80211_send_nullfunc(local, sdata, 1);
- if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
- (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+ if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
return;
conf->flags |= IEEE80211_CONF_PS;
@@ -1450,7 +1474,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
int count = 0;
int timeout;
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) {
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) {
local->ps_sdata = NULL;
return;
}
@@ -1596,7 +1620,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
- if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
if (drv_tx_frames_pending(local)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
@@ -1609,8 +1633,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
}
}
- if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
- (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) ||
+ if (!(ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
+ ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) ||
(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
local->hw.conf.flags |= IEEE80211_CONF_PS;
@@ -2135,7 +2159,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
ieee80211_recalc_ps(local, -1);
mutex_unlock(&local->iflist_mtx);
- if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
goto out;
/*
@@ -2233,7 +2257,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
*/
ifmgd->probe_send_count++;
- if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
ifmgd->nullfunc_failed = false;
ieee80211_send_nullfunc(sdata->local, sdata, 0);
} else {
@@ -2495,6 +2519,34 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.auth_data = NULL;
}
+static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
+ bool assoc)
+{
+ struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
+
+ sdata_assert_lock(sdata);
+
+ if (!assoc) {
+ /*
+ * we are not associated yet, the only timer that could be
+ * running is the timeout for the association response which
+ * which is not relevant anymore.
+ */
+ del_timer_sync(&sdata->u.mgd.timer);
+ sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
+
+ eth_zero_addr(sdata->u.mgd.bssid);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
+ sdata->u.mgd.flags = 0;
+ mutex_lock(&sdata->local->mtx);
+ ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&sdata->local->mtx);
+ }
+
+ kfree(assoc_data);
+ sdata->u.mgd.assoc_data = NULL;
+}
+
static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
@@ -2510,7 +2562,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
return;
auth_data->expected_transaction = 4;
drv_mgd_prepare_tx(sdata->local, sdata);
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_send_auth(sdata, 3, auth_data->algorithm, 0,
@@ -2687,28 +2739,42 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- const u8 *bssid = NULL;
- u16 reason_code;
+ u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
sdata_assert_lock(sdata);
if (len < 24 + 2)
return;
- if (!ifmgd->associated ||
- !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
- return;
+ if (ifmgd->associated &&
+ ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) {
+ const u8 *bssid = ifmgd->associated->bssid;
- bssid = ifmgd->associated->bssid;
+ sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
+ bssid, reason_code,
+ ieee80211_get_reason_code_string(reason_code));
- reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
+ ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
- bssid, reason_code, ieee80211_get_reason_code_string(reason_code));
+ ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false,
+ reason_code);
+ return;
+ }
- ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
+ if (ifmgd->assoc_data &&
+ ether_addr_equal(mgmt->bssid, ifmgd->assoc_data->bss->bssid)) {
+ const u8 *bssid = ifmgd->assoc_data->bss->bssid;
- ieee80211_report_disconnect(sdata, (u8 *)mgmt, len, false, reason_code);
+ sdata_info(sdata,
+ "deauthenticated from %pM while associating (Reason: %u=%s)\n",
+ bssid, reason_code,
+ ieee80211_get_reason_code_string(reason_code));
+
+ ieee80211_destroy_assoc_data(sdata, false);
+
+ cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+ return;
+ }
}
@@ -2788,34 +2854,6 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
}
}
-static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
- bool assoc)
-{
- struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
-
- sdata_assert_lock(sdata);
-
- if (!assoc) {
- /*
- * we are not associated yet, the only timer that could be
- * running is the timeout for the association response which
- * which is not relevant anymore.
- */
- del_timer_sync(&sdata->u.mgd.timer);
- sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
-
- eth_zero_addr(sdata->u.mgd.bssid);
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
- sdata->u.mgd.flags = 0;
- mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
- mutex_unlock(&sdata->local->mtx);
- }
-
- kfree(assoc_data);
- sdata->u.mgd.assoc_data = NULL;
-}
-
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -3299,7 +3337,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
ifmgd->have_beacon = true;
ifmgd->assoc_data->need_beacon = false;
- if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
sdata->vif.bss_conf.sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
sdata->vif.bss_conf.sync_device_ts =
@@ -3405,7 +3443,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
len - baselen, false, &elems,
care_about_ies, ncrc);
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
+ if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) {
bool directed_tim = ieee80211_check_tim(elems.tim,
elems.tim_len,
ifmgd->aid);
@@ -3473,7 +3511,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
* the driver will use them. The synchronized view is currently
* guaranteed only in certain callbacks.
*/
- if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
sdata->vif.bss_conf.sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
sdata->vif.bss_conf.sync_device_ts =
@@ -3711,7 +3749,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
auth_data->expected_transaction = trans;
}
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
@@ -3784,7 +3822,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
IEEE80211_ASSOC_MAX_TRIES);
ieee80211_send_assoc(sdata);
- if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
assoc_data->timeout_started = true;
run_again(sdata, assoc_data->timeout);
@@ -3898,7 +3936,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
max_tries = max_nullfunc_tries;
else
max_tries = max_probe_tries;
@@ -3923,7 +3961,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
}
} else if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(sdata, ifmgd->probe_timeout);
- else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ else if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
mlme_dbg(sdata,
"Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
bssid, probe_wait_ms);
@@ -3992,14 +4030,11 @@ static void ieee80211_sta_monitor_work(struct work_struct *work)
static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
{
- u32 flags;
-
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
__ieee80211_stop_poll(sdata);
/* let's probe the connection once */
- flags = sdata->local->hw.flags;
- if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
+ if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.monitor_work);
/* and do all the other regular work too */
@@ -4307,15 +4342,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_bss *cbss, bool assoc)
+ struct cfg80211_bss *cbss, bool assoc,
+ bool override)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss *bss = (void *)cbss->priv;
struct sta_info *new_sta = NULL;
struct ieee80211_supported_band *sband;
- struct ieee80211_sta_ht_cap sta_ht_cap;
- bool have_sta = false, is_override = false;
+ bool have_sta = false;
int err;
sband = local->hw.wiphy->bands[cbss->channel->band];
@@ -4335,14 +4370,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
}
- memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
- ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
-
- is_override = (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) !=
- (sband->ht_cap.cap &
- IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
- if (new_sta || is_override) {
+ if (new_sta || override) {
err = ieee80211_prep_channel(sdata, cbss);
if (err) {
if (new_sta)
@@ -4419,8 +4447,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.sync_dtim_count = tim_ie[2];
else
sdata->vif.bss_conf.sync_dtim_count = 0;
- } else if (!(local->hw.flags &
- IEEE80211_HW_TIMING_BEACON_ONLY)) {
+ } else if (!ieee80211_hw_check(&sdata->local->hw,
+ TIMING_BEACON_ONLY)) {
ies = rcu_dereference(cbss->proberesp_ies);
/* must be non-NULL since beacon IEs were NULL */
sdata->vif.bss_conf.sync_tsf = ies->tsf;
@@ -4552,7 +4580,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
- err = ieee80211_prep_connection(sdata, req->bss, false);
+ err = ieee80211_prep_connection(sdata, req->bss, false, false);
if (err)
goto err_clear;
@@ -4570,6 +4598,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
eth_zero_addr(ifmgd->bssid);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
ifmgd->auth_data = NULL;
+ mutex_lock(&sdata->local->mtx);
+ ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&sdata->local->mtx);
err_free:
kfree(auth_data);
return err;
@@ -4624,6 +4655,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
const u8 *ssidie, *ht_ie, *vht_ie;
int i, err;
+ bool override = false;
assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
if (!assoc_data)
@@ -4728,14 +4760,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
}
- if (req->flags & ASSOC_REQ_DISABLE_HT) {
- ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
- ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
- }
-
- if (req->flags & ASSOC_REQ_DISABLE_VHT)
- ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-
/* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
if (!sband->ht_cap.ht_supported ||
@@ -4802,7 +4826,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
- (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
+ ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n"))
sdata->vif.driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
@@ -4847,14 +4871,43 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ifmgd->dtim_period = 0;
ifmgd->have_beacon = false;
- err = ieee80211_prep_connection(sdata, req->bss, true);
+ /* override HT/VHT configuration only if the AP and we support it */
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+ struct ieee80211_sta_ht_cap sta_ht_cap;
+
+ if (req->flags & ASSOC_REQ_DISABLE_HT)
+ override = true;
+
+ memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+ /* check for 40 MHz disable override */
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ) &&
+ sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+ !(sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ override = true;
+
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
+ req->flags & ASSOC_REQ_DISABLE_VHT)
+ override = true;
+ }
+
+ if (req->flags & ASSOC_REQ_DISABLE_HT) {
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ }
+
+ if (req->flags & ASSOC_REQ_DISABLE_VHT)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
+ err = ieee80211_prep_connection(sdata, req->bss, true, override);
if (err)
goto err_clear;
rcu_read_lock();
beacon_ies = rcu_dereference(req->bss->beacon_ies);
- if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC &&
+ if (ieee80211_hw_check(&sdata->local->hw, NEED_DTIM_BEFORE_ASSOC) &&
!beacon_ies) {
/*
* Wait up to one beacon interval ...
@@ -4881,7 +4934,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
assoc_data->timeout = jiffies;
assoc_data->timeout_started = true;
- if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
sdata->vif.bss_conf.sync_tsf = beacon_ies->tsf;
sdata->vif.bss_conf.sync_device_ts =
bss->device_ts_beacon;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 683f0e3cb124..f2c75cf491fc 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -46,7 +46,7 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
}
if (!local->offchannel_ps_enabled ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index ac6ad6238e3a..06b60980c62c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -23,7 +23,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
ieee80211_del_virtual_monitor(local);
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
@@ -82,7 +82,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (err < 0) {
local->quiescing = false;
local->wowlan = false;
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta,
&local->sta_list, list) {
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d53355b011f5..36ba7c4f0283 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -680,12 +680,18 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
info->control.rates[i].count = 0;
}
- if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ if (ieee80211_hw_check(&sdata->local->hw, HAS_RATE_CONTROL))
return;
- ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+ if (ista) {
+ spin_lock_bh(&sta->rate_ctrl_lock);
+ ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
+ } else {
+ ref->ops->get_rate(ref->priv, NULL, NULL, txrc);
+ }
- if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
+ if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_RC_TABLE))
return;
ieee80211_get_tx_rates(&sdata->vif, ista, txrc->skb,
@@ -727,7 +733,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
if (local->open_count)
return -EBUSY;
- if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
if (WARN_ON(!local->ops->set_rts_threshold))
return -EINVAL;
return 0;
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 38652f09feaf..25c9be5dd7fd 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -42,10 +42,12 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
return;
+ spin_lock_bh(&sta->rate_ctrl_lock);
if (ref->ops->tx_status)
ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
else
ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
}
static inline void
@@ -64,7 +66,9 @@ rate_control_tx_status_noskb(struct ieee80211_local *local,
if (WARN_ON_ONCE(!ref->ops->tx_status_noskb))
return;
+ spin_lock_bh(&sta->rate_ctrl_lock);
ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
}
static inline void rate_control_rate_init(struct sta_info *sta)
@@ -91,8 +95,10 @@ static inline void rate_control_rate_init(struct sta_info *sta)
sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+ spin_lock_bh(&sta->rate_ctrl_lock);
ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
priv_sta);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
rcu_read_unlock();
set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
}
@@ -115,18 +121,20 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
return;
}
+ spin_lock_bh(&sta->rate_ctrl_lock);
ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
ista, priv_sta, changed);
+ spin_unlock_bh(&sta->rate_ctrl_lock);
rcu_read_unlock();
}
drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
- struct ieee80211_sta *sta,
- gfp_t gfp)
+ struct sta_info *sta, gfp_t gfp)
{
- return ref->ops->alloc_sta(ref->priv, sta, gfp);
+ spin_lock_init(&sta->rate_ctrl_lock);
+ return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp);
}
static inline void rate_control_free_sta(struct sta_info *sta)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7430a1df2ab1..543b67233535 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1070,7 +1070,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (sband->band != IEEE80211_BAND_2GHZ)
return;
- if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
+ if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
return;
mi->cck_supported = 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5793f75c5ffd..5dae166cb7f5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -32,6 +32,16 @@
#include "wme.h"
#include "rate.h"
+static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_packets++;
+ tstats->rx_bytes += len;
+ u64_stats_update_end(&tstats->syncp);
+}
+
/*
* monitor mode reception
*
@@ -42,7 +52,7 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
struct sk_buff *skb,
unsigned int rtap_vendor_space)
{
- if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
+ if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
if (likely(skb->len > FCS_LEN))
__pskb_trim(skb, skb->len - FCS_LEN);
else {
@@ -100,7 +110,7 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
len = ALIGN(len, 8);
len += 8;
}
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
len += 1;
/* antenna field, if we don't have per-chain info */
@@ -175,7 +185,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
}
mpdulen = skb->len;
- if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
+ if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
mpdulen += FCS_LEN;
rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
@@ -229,7 +239,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
}
/* IEEE80211_RADIOTAP_FLAGS */
- if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
+ if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
*pos |= IEEE80211_RADIOTAP_F_FCS;
if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
*pos |= IEEE80211_RADIOTAP_F_BADFCS;
@@ -279,7 +289,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += 2;
/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
+ if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
*pos = status->signal;
rthdr->it_present |=
@@ -448,7 +458,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
* the SKB because it has a bad FCS/PLCP checksum.
*/
- if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
+ if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
present_fcs_len = FCS_LEN;
/* ensure hdr->frame_control and vendor radiotap data are in skb head */
@@ -529,8 +539,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
}
prev_dev = sdata->dev;
- sdata->dev->stats.rx_packets++;
- sdata->dev->stats.rx_bytes += skb->len;
+ ieee80211_rx_stats(sdata->dev, skb->len);
}
if (prev_dev) {
@@ -981,7 +990,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
struct sk_buff *skb = rx->skb;
struct ieee80211_local *local = rx->local;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct sta_info *sta = rx->sta;
struct tid_ampdu_rx *tid_agg_rx;
u16 sc;
@@ -1016,10 +1024,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
goto dont_reorder;
- /* not actually part of this BA session */
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- goto dont_reorder;
-
/* new, potentially un-ordered, ampdu frame - process it */
/* reset session timer */
@@ -1073,10 +1077,8 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] ==
hdr->seq_ctrl)) {
- if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
- rx->local->dot11FrameDuplicateCount++;
- rx->sta->num_duplicates++;
- }
+ I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
+ rx->sta->num_duplicates++;
return RX_DROP_UNUSABLE;
} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
@@ -1195,11 +1197,13 @@ static void sta_ps_start(struct sta_info *sta)
atomic_inc(&ps->num_sta_ps);
set_sta_flag(sta, WLAN_STA_PS_STA);
- if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+ if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
sta->sta.addr, sta->sta.aid);
+ ieee80211_clear_fast_xmit(sta);
+
if (!sta->sta.txq[0])
return;
@@ -1241,7 +1245,7 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
bool in_ps;
- WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
+ WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
/* Don't let the same PS state be set twice */
in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
@@ -1265,7 +1269,7 @@ ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
int tid, ac;
- if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ if (!rx->sta)
return RX_CONTINUE;
if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1277,7 +1281,7 @@ ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
* uAPSD and PS-Poll frames (the latter shouldn't even come up from
* it to mac80211 since they're handled.)
*/
- if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
return RX_CONTINUE;
/*
@@ -1367,11 +1371,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
}
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
- u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
- NL80211_IFTYPE_OCB);
- /* OCB uses wild-card BSSID */
- if (is_broadcast_ether_addr(bssid))
- sta->last_rx = jiffies;
+ sta->last_rx = jiffies;
} else if (!is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
@@ -1386,9 +1386,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
}
}
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- return RX_CONTINUE;
-
if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_sta_rx_notify(rx->sdata, hdr);
@@ -1416,7 +1413,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Change STA power saving mode only at the end of a frame
* exchange sequence.
*/
- if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
+ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1517,13 +1514,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* possible.
*/
- /*
- * No point in finding a key and decrypting if the frame is neither
- * addressed to us nor a multicast frame.
- */
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- return RX_CONTINUE;
-
/* start without a key */
rx->key = NULL;
fc = hdr->frame_control;
@@ -1795,7 +1785,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
frag = sc & IEEE80211_SCTL_FRAG;
if (is_multicast_ether_addr(hdr->addr1)) {
- rx->local->dot11MulticastReceivedFrameCount++;
+ I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
goto out_no_led;
}
@@ -1878,7 +1868,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
rx->skb = __skb_dequeue(&entry->skb_list);
if (skb_tailroom(rx->skb) < entry->extra_len) {
- I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
+ I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
GFP_ATOMIC))) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2054,18 +2044,15 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
struct sk_buff *skb, *xmit_skb;
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
struct sta_info *dsta;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rx->skb->len;
skb = rx->skb;
xmit_skb = NULL;
+ ieee80211_rx_stats(dev, skb->len);
+
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
- (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest)) {
/*
@@ -2207,7 +2194,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct sk_buff *skb = rx->skb, *fwd_skb;
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u16 q, hdrlen;
@@ -2238,8 +2224,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
return RX_DROP_MONITOR;
- if (!ieee80211_is_data(hdr->frame_control) ||
- !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ if (!ieee80211_is_data(hdr->frame_control))
return RX_CONTINUE;
if (!mesh_hdr->ttl)
@@ -2330,11 +2315,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
ieee80211_add_pending_skb(local, fwd_skb);
out:
- if (is_multicast_ether_addr(hdr->addr1) ||
- sdata->dev->flags & IFF_PROMISC)
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
- else
- return RX_DROP_MONITOR;
+ return RX_DROP_MONITOR;
}
#endif
@@ -2445,6 +2428,9 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
struct {
__le16 control, start_seq_num;
} __packed bar_data;
+ struct ieee80211_event event = {
+ .type = BAR_RX_EVENT,
+ };
if (!rx->sta)
return RX_DROP_MONITOR;
@@ -2460,6 +2446,9 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
return RX_DROP_MONITOR;
start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
+ event.u.ba.tid = tid;
+ event.u.ba.ssn = start_seq_num;
+ event.u.ba.sta = &rx->sta->sta;
/* reset session timer */
if (tid_agg_rx->timeout)
@@ -2472,6 +2461,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
start_seq_num, frames);
spin_unlock(&tid_agg_rx->reorder_lock);
+ drv_event_callback(rx->local, rx->sdata, &event);
+
kfree_skb(skb);
return RX_QUEUED;
}
@@ -2552,7 +2543,7 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
!(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
int sig = 0;
- if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
sig = status->signal;
cfg80211_report_obss_beacon(rx->local->hw.wiphy,
@@ -2561,9 +2552,6 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
rx->flags |= IEEE80211_RX_BEACON_REPORTED;
}
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- return RX_DROP_MONITOR;
-
if (ieee80211_drop_unencrypted_mgmt(rx))
return RX_DROP_UNUSABLE;
@@ -2591,9 +2579,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
return RX_DROP_UNUSABLE;
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- return RX_DROP_UNUSABLE;
-
switch (mgmt->u.action.category) {
case WLAN_CATEGORY_HT:
/* reject HT action frames from stations not supporting HT */
@@ -2889,7 +2874,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
* it transmitted were processed or returned.
*/
- if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM))
sig = status->signal;
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
@@ -2954,7 +2939,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
IEEE80211_TX_CTL_NO_CCK_RATE;
- if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
info->hw_queue =
local->hw.offchannel_tx_hw_queue;
}
@@ -3077,8 +3062,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
}
prev_dev = sdata->dev;
- sdata->dev->stats.rx_packets++;
- sdata->dev->stats.rx_bytes += skb->len;
+ ieee80211_rx_stats(sdata->dev, skb->len);
}
if (prev_dev) {
@@ -3246,16 +3230,25 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
spin_unlock(&tid_agg_rx->reorder_lock);
+ if (!skb_queue_empty(&frames)) {
+ struct ieee80211_event event = {
+ .type = BA_FRAME_TIMEOUT,
+ .u.ba.tid = tid,
+ .u.ba.sta = &sta->sta,
+ };
+ drv_event_callback(rx.local, rx.sdata, &event);
+ }
+
ieee80211_rx_handlers(&rx, &frames);
}
/* main receive path */
-static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
- struct ieee80211_hdr *hdr)
+static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct sk_buff *skb = rx->skb;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
int multicast = is_multicast_ether_addr(hdr->addr1);
@@ -3264,30 +3257,23 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
case NL80211_IFTYPE_STATION:
if (!bssid && !sdata->u.mgd.use_4addr)
return false;
- if (!multicast &&
- !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
- if (!(sdata->dev->flags & IFF_PROMISC) ||
- sdata->u.mgd.use_4addr)
- return false;
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
- }
- break;
+ if (multicast)
+ return true;
+ return ether_addr_equal(sdata->vif.addr, hdr->addr1);
case NL80211_IFTYPE_ADHOC:
if (!bssid)
return false;
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
return false;
- if (ieee80211_is_beacon(hdr->frame_control)) {
+ if (ieee80211_is_beacon(hdr->frame_control))
return true;
- } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
+ if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
return false;
- } else if (!multicast &&
- !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
- if (!(sdata->dev->flags & IFF_PROMISC))
- return false;
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
- } else if (!rx->sta) {
+ if (!multicast &&
+ !ether_addr_equal(sdata->vif.addr, hdr->addr1))
+ return false;
+ if (!rx->sta) {
int rate_idx;
if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
rate_idx = 0; /* TODO: HT/VHT rates */
@@ -3296,25 +3282,18 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
BIT(rate_idx));
}
- break;
+ return true;
case NL80211_IFTYPE_OCB:
if (!bssid)
return false;
- if (ieee80211_is_beacon(hdr->frame_control)) {
+ if (ieee80211_is_beacon(hdr->frame_control))
return false;
- } else if (!is_broadcast_ether_addr(bssid)) {
- ocb_dbg(sdata, "BSSID mismatch in OCB mode!\n");
+ if (!is_broadcast_ether_addr(bssid))
return false;
- } else if (!multicast &&
- !ether_addr_equal(sdata->dev->dev_addr,
- hdr->addr1)) {
- /* if we are in promisc mode we also accept
- * packets not destined for us
- */
- if (!(sdata->dev->flags & IFF_PROMISC))
- return false;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
- } else if (!rx->sta) {
+ if (!multicast &&
+ !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
+ return false;
+ if (!rx->sta) {
int rate_idx;
if (status->flag & RX_FLAG_HT)
rate_idx = 0; /* TODO: HT rates */
@@ -3323,22 +3302,17 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
BIT(rate_idx));
}
- break;
+ return true;
case NL80211_IFTYPE_MESH_POINT:
- if (!multicast &&
- !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
- if (!(sdata->dev->flags & IFF_PROMISC))
- return false;
-
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
- }
- break;
+ if (multicast)
+ return true;
+ return ether_addr_equal(sdata->vif.addr, hdr->addr1);
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
- if (!bssid) {
- if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
- return false;
- } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
+ if (!bssid)
+ return ether_addr_equal(sdata->vif.addr, hdr->addr1);
+
+ if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
/*
* Accept public action frames even when the
* BSSID doesn't match, this is used for P2P
@@ -3350,10 +3324,10 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
return false;
if (ieee80211_is_public_action(hdr, skb->len))
return true;
- if (!ieee80211_is_beacon(hdr->frame_control))
- return false;
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
- } else if (!ieee80211_has_tods(hdr->frame_control)) {
+ return ieee80211_is_beacon(hdr->frame_control);
+ }
+
+ if (!ieee80211_has_tods(hdr->frame_control)) {
/* ignore data frames to TDLS-peers */
if (ieee80211_is_data(hdr->frame_control))
return false;
@@ -3362,30 +3336,22 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
- break;
+ return true;
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
return false;
- if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
- return false;
- break;
+ return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
case NL80211_IFTYPE_P2P_DEVICE:
- if (!ieee80211_is_public_action(hdr, skb->len) &&
- !ieee80211_is_probe_req(hdr->frame_control) &&
- !ieee80211_is_probe_resp(hdr->frame_control) &&
- !ieee80211_is_beacon(hdr->frame_control))
- return false;
- if (!ether_addr_equal(sdata->vif.addr, hdr->addr1) &&
- !multicast)
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
- break;
+ return ieee80211_is_public_action(hdr, skb->len) ||
+ ieee80211_is_probe_req(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control);
default:
- /* should never get here */
- WARN_ON_ONCE(1);
break;
}
- return true;
+ WARN_ON_ONCE(1);
+ return false;
}
/*
@@ -3399,13 +3365,10 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
{
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_hdr *hdr = (void *)skb->data;
rx->skb = skb;
- status->rx_flags |= IEEE80211_RX_RA_MATCH;
- if (!prepare_for_handlers(rx, hdr))
+ if (!ieee80211_accept_frame(rx))
return false;
if (!consume) {
@@ -3448,7 +3411,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
rx.local = local;
if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
- local->dot11ReceivedFragmentCount++;
+ I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
if (ieee80211_is_mgmt(fc)) {
/* drop frame if too short for header */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 7bb6a9383f58..11d0901ebb7b 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -6,7 +6,7 @@
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2013-2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -69,10 +69,11 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
int clen, srlen;
enum nl80211_bss_scan_width scan_width;
s32 signal = 0;
+ bool signal_valid;
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
signal = rx_status->signal * 100;
- else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
+ else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
signal = (rx_status->signal * 100) / local->hw.max_signal;
scan_width = NL80211_BSS_CHAN_WIDTH_20;
@@ -86,6 +87,11 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
GFP_ATOMIC);
if (!cbss)
return NULL;
+ /* In case the signal is invalid update the status */
+ signal_valid = abs(channel->center_freq - cbss->channel->center_freq)
+ <= local->hw.wiphy->max_adj_channel_rssi_comp;
+ if (!signal_valid)
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
bss = (void *)cbss->priv;
@@ -257,7 +263,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
return false;
- if (local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) {
+ if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
for (i = 0; i < req->n_channels; i++) {
local->hw_scan_req->req.channels[i] = req->channels[i];
bands_used |= BIT(req->channels[i]->band);
@@ -326,7 +332,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
return;
if (hw_scan && !aborted &&
- !(local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) &&
+ !ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) &&
ieee80211_prep_hw_scan(local)) {
int rc;
@@ -520,7 +526,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
- if (local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) {
+ if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) {
int i, n_bands = 0;
u8 bands_counted = 0;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 2880f2ae99ab..666ddac3c87c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -71,6 +71,7 @@ static const struct rhashtable_params sta_rht_params = {
.key_offset = offsetof(struct sta_info, sta.addr),
.key_len = ETH_ALEN,
.hashfn = sta_addr_hash,
+ .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
};
/* Caller must hold local->sta_mtx */
@@ -281,12 +282,12 @@ static void sta_deliver_ps_frames(struct work_struct *wk)
static int sta_prepare_rate_control(struct ieee80211_local *local,
struct sta_info *sta, gfp_t gfp)
{
- if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
return 0;
sta->rate_ctrl = local->rate_ctrl;
sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
- &sta->sta, gfp);
+ sta, gfp);
if (!sta->rate_ctrl_priv)
return -ENOMEM;
@@ -312,6 +313,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
#ifdef CONFIG_MAC80211_MESH
+ spin_lock_init(&sta->plink_lock);
if (ieee80211_vif_is_mesh(&sdata->vif) &&
!sdata->u.mesh.user_mpm)
init_timer(&sta->plink_timer);
@@ -641,7 +643,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
- if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
return;
if (sta->dead)
@@ -1146,7 +1148,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
sta->driver_buffered_tids = 0;
sta->txq_buffered_tids = 0;
- if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+ if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
if (sta->sta.txq[0]) {
@@ -1217,6 +1219,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
ps_dbg(sdata,
"STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
sta->sta.addr, sta->sta.aid, filtered, buffered);
+
+ ieee80211_check_fast_xmit(sta);
}
static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1615,6 +1619,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
if (block) {
set_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ ieee80211_clear_fast_xmit(sta);
return;
}
@@ -1632,6 +1637,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &sta->drv_deliver_wk);
} else {
clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ ieee80211_check_fast_xmit(sta);
}
}
EXPORT_SYMBOL(ieee80211_sta_block_awake);
@@ -1736,6 +1742,7 @@ int sta_info_move_state(struct sta_info *sta,
!sta->sdata->u.vlan.sta))
atomic_dec(&sta->sdata->bss->num_mcast_sta);
clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+ ieee80211_clear_fast_xmit(sta);
}
break;
case IEEE80211_STA_AUTHORIZED:
@@ -1745,6 +1752,7 @@ int sta_info_move_state(struct sta_info *sta,
!sta->sdata->u.vlan.sta))
atomic_inc(&sta->sdata->bss->num_mcast_sta);
set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+ ieee80211_check_fast_xmit(sta);
}
break;
default:
@@ -1871,8 +1879,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->rx_beacon_signal_avg = ieee80211_ave_rssi(&sdata->vif);
}
- if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
- (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
+ if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
+ ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
sinfo->signal = (s8)sta->last_signal;
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
@@ -1924,7 +1932,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (!(tidstats->filled &
BIT(NL80211_TID_STATS_TX_MSDU_RETRIES)) &&
- local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |=
BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
tidstats->tx_msdu_retries = sta->tx_msdu_retries[i];
@@ -1932,7 +1940,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if (!(tidstats->filled &
BIT(NL80211_TID_STATS_TX_MSDU_FAILED)) &&
- local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
tidstats->filled |=
BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
tidstats->tx_msdu_failed = sta->tx_msdu_failed[i];
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5c164fb3f6c5..226f8ca47ad6 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -241,6 +241,34 @@ struct sta_ampdu_mlme {
/* Value to indicate no TID reservation */
#define IEEE80211_TID_UNRESERVED 0xff
+#define IEEE80211_FAST_XMIT_MAX_IV 18
+
+/**
+ * struct ieee80211_fast_tx - TX fastpath information
+ * @key: key to use for hw crypto
+ * @hdr: the 802.11 header to put with the frame
+ * @hdr_len: actual 802.11 header length
+ * @sa_offs: offset of the SA
+ * @da_offs: offset of the DA
+ * @pn_offs: offset where to put PN for crypto (or 0 if not needed)
+ * @band: band this will be transmitted on, for tx_info
+ * @rcu_head: RCU head to free this struct
+ *
+ * This struct is small enough so that the common case (maximum crypto
+ * header length of 8 like for CCMP/GCMP) fits into a single 64-byte
+ * cache line.
+ */
+struct ieee80211_fast_tx {
+ struct ieee80211_key *key;
+ u8 hdr_len;
+ u8 sa_offs, da_offs, pn_offs;
+ u8 band;
+ u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
+ sizeof(rfc1042_header)];
+
+ struct rcu_head rcu_head;
+};
+
/**
* struct sta_info - STA information
*
@@ -257,6 +285,8 @@ struct sta_ampdu_mlme {
* @gtk: group keys negotiated with this station, if any
* @gtk_idx: last installed group key index
* @rate_ctrl: rate control algorithm reference
+ * @rate_ctrl_lock: spinlock used to protect rate control data
+ * (data inside the algorithm, so serializes calls there)
* @rate_ctrl_priv: rate control private per-STA pointer
* @last_tx_rate: rate used for last transmit, to report to userspace as
* "the" transmit rate
@@ -295,10 +325,10 @@ struct sta_ampdu_mlme {
* @fail_avg: moving percentage of failed MSDUs
* @tx_packets: number of RX/TX MSDUs
* @tx_bytes: number of bytes transmitted to this STA
- * @tx_fragments: number of transmitted MPDUs
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
* @timer_to_tid: identity mapping to ID timers
+ * @plink_lock: serialize access to plink fields
* @llid: Local link ID
* @plid: Peer link ID
* @reason: Cancel reason on PLINK_HOLDING state
@@ -338,6 +368,9 @@ struct sta_ampdu_mlme {
* using IEEE80211_NUM_TID entry for non-QoS frames
* @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
* entry for non-QoS frames
+ * @fast_tx: TX fastpath information
+ * @processed_beacon: set to true after peer rates and capabilities are
+ * processed
*/
struct sta_info {
/* General information, mostly static */
@@ -352,8 +385,11 @@ struct sta_info {
u8 ptk_idx;
struct rate_control_ref *rate_ctrl;
void *rate_ctrl_priv;
+ spinlock_t rate_ctrl_lock;
spinlock_t lock;
+ struct ieee80211_fast_tx __rcu *fast_tx;
+
struct work_struct drv_deliver_wk;
u16 listen_interval;
@@ -400,7 +436,6 @@ struct sta_info {
unsigned int fail_avg;
/* Updated from TX path only, no locking requirements */
- u32 tx_fragments;
u64 tx_packets[IEEE80211_NUM_ACS];
u64 tx_bytes[IEEE80211_NUM_ACS];
struct ieee80211_tx_rate last_tx_rate;
@@ -422,9 +457,10 @@ struct sta_info {
#ifdef CONFIG_MAC80211_MESH
/*
- * Mesh peer link attributes
+ * Mesh peer link attributes, protected by plink_lock.
* TODO: move to a sub-structure that is referenced with pointer?
*/
+ spinlock_t plink_lock;
u16 llid;
u16 plid;
u16 reason;
@@ -432,12 +468,14 @@ struct sta_info {
enum nl80211_plink_state plink_state;
u32 plink_timeout;
struct timer_list plink_timer;
+
s64 t_offset;
s64 t_offset_setpoint;
/* mesh power save */
enum nl80211_mesh_power_mode local_pm;
enum nl80211_mesh_power_mode peer_pm;
enum nl80211_mesh_power_mode nonpeer_pm;
+ bool processed_beacon;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 005fdbe39a8b..45628f37c083 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -181,7 +181,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
sta->last_rx = jiffies;
if (ieee80211_is_data_qos(mgmt->frame_control)) {
@@ -414,8 +414,7 @@ static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local,
if (is_teardown) {
/* This mechanism relies on being able to get ACKs */
- WARN_ON(!(local->hw.flags &
- IEEE80211_HW_REPORTS_TX_ACK_STATUS));
+ WARN_ON(!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS));
/* Check if peer has ACKed */
if (flags & IEEE80211_TX_STAT_ACK) {
@@ -429,6 +428,74 @@ static void ieee80211_tdls_td_tx_handle(struct ieee80211_local *local,
}
}
+static struct ieee80211_sub_if_data *
+ieee80211_sdata_from_skb(struct ieee80211_local *local, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (skb->dev) {
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!sdata->dev)
+ continue;
+
+ if (skb->dev == sdata->dev)
+ return sdata;
+ }
+
+ return NULL;
+ }
+
+ return rcu_dereference(local->p2p_sdata);
+}
+
+static void ieee80211_report_ack_skb(struct ieee80211_local *local,
+ struct ieee80211_tx_info *info,
+ bool acked, bool dropped)
+{
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->ack_status_lock, flags);
+ skb = idr_find(&local->ack_status_frames, info->ack_frame_id);
+ if (skb)
+ idr_remove(&local->ack_status_frames, info->ack_frame_id);
+ spin_unlock_irqrestore(&local->ack_status_lock, flags);
+
+ if (!skb)
+ return;
+
+ if (dropped) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
+ u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ rcu_read_lock();
+ sdata = ieee80211_sdata_from_skb(local, skb);
+ if (sdata) {
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))
+ cfg80211_probe_status(sdata->dev, hdr->addr1,
+ cookie, acked,
+ GFP_ATOMIC);
+ else
+ cfg80211_mgmt_tx_status(&sdata->wdev, cookie,
+ skb->data, skb->len,
+ acked, GFP_ATOMIC);
+ }
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ } else {
+ /* consumes skb */
+ skb_complete_wifi_ack(skb, acked);
+ }
+}
+
static void ieee80211_report_used_skb(struct ieee80211_local *local,
struct sk_buff *skb, bool dropped)
{
@@ -439,28 +506,12 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
if (dropped)
acked = false;
- if (info->flags & (IEEE80211_TX_INTFL_NL80211_FRAME_TX |
- IEEE80211_TX_INTFL_MLME_CONN_TX)) {
- struct ieee80211_sub_if_data *sdata = NULL;
- struct ieee80211_sub_if_data *iter_sdata;
- u64 cookie = (unsigned long)skb;
+ if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+ struct ieee80211_sub_if_data *sdata;
rcu_read_lock();
- if (skb->dev) {
- list_for_each_entry_rcu(iter_sdata, &local->interfaces,
- list) {
- if (!iter_sdata->dev)
- continue;
-
- if (skb->dev == iter_sdata->dev) {
- sdata = iter_sdata;
- break;
- }
- }
- } else {
- sdata = rcu_dereference(local->p2p_sdata);
- }
+ sdata = ieee80211_sdata_from_skb(local, skb);
if (!sdata) {
skb->dev = NULL;
@@ -478,38 +529,14 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
ieee80211_mgd_conn_tx_status(sdata,
hdr->frame_control,
acked);
- } else if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- cfg80211_probe_status(sdata->dev, hdr->addr1,
- cookie, acked, GFP_ATOMIC);
} else {
- cfg80211_mgmt_tx_status(&sdata->wdev, cookie, skb->data,
- skb->len, acked, GFP_ATOMIC);
+ /* we assign ack frame ID for the others */
+ WARN_ON(1);
}
rcu_read_unlock();
- }
-
- if (unlikely(info->ack_frame_id)) {
- struct sk_buff *ack_skb;
- unsigned long flags;
-
- spin_lock_irqsave(&local->ack_status_lock, flags);
- ack_skb = idr_find(&local->ack_status_frames,
- info->ack_frame_id);
- if (ack_skb)
- idr_remove(&local->ack_status_frames,
- info->ack_frame_id);
- spin_unlock_irqrestore(&local->ack_status_lock, flags);
-
- if (ack_skb) {
- if (!dropped) {
- /* consumes ack_skb */
- skb_complete_wifi_ack(ack_skb, acked);
- } else {
- dev_kfree_skb_any(ack_skb);
- }
- }
+ } else if (info->ack_frame_id) {
+ ieee80211_report_ack_skb(local, info, acked, dropped);
}
}
@@ -631,15 +658,15 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
}
if (acked || noack_success) {
- local->dot11TransmittedFrameCount++;
- if (!pubsta)
- local->dot11MulticastTransmittedFrameCount++;
- if (retry_count > 0)
- local->dot11RetryCount++;
- if (retry_count > 1)
- local->dot11MultipleRetryCount++;
+ I802_DEBUG_INC(local->dot11TransmittedFrameCount);
+ if (!pubsta)
+ I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
+ if (retry_count > 0)
+ I802_DEBUG_INC(local->dot11RetryCount);
+ if (retry_count > 1)
+ I802_DEBUG_INC(local->dot11MultipleRetryCount);
} else {
- local->dot11FailedCount++;
+ I802_DEBUG_INC(local->dot11FailedCount);
}
}
EXPORT_SYMBOL(ieee80211_tx_status_noskb);
@@ -703,7 +730,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
ieee80211_get_qos_ctl(hdr),
sta, true, acked);
- if ((local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) &&
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
(ieee80211_is_data(hdr->frame_control)) &&
(rates_idx != -1))
sta->last_tx_rate = info->status.rates[rates_idx];
@@ -770,11 +797,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
ieee80211_frame_acked(sta, skb);
if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
- (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
acked, info->status.tx_time);
- if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
if (sta->lost_packets)
sta->lost_packets = 0;
@@ -802,13 +829,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if ((info->flags & IEEE80211_TX_STAT_ACK) ||
(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) {
if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
- local->dot11TransmittedFrameCount++;
+ I802_DEBUG_INC(local->dot11TransmittedFrameCount);
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
- local->dot11MulticastTransmittedFrameCount++;
+ I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
if (retry_count > 0)
- local->dot11RetryCount++;
+ I802_DEBUG_INC(local->dot11RetryCount);
if (retry_count > 1)
- local->dot11MultipleRetryCount++;
+ I802_DEBUG_INC(local->dot11MultipleRetryCount);
}
/* This counter shall be incremented for an acknowledged MPDU
@@ -818,14 +845,14 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!is_multicast_ether_addr(hdr->addr1) ||
ieee80211_is_data(fc) ||
ieee80211_is_mgmt(fc))
- local->dot11TransmittedFragmentCount++;
+ I802_DEBUG_INC(local->dot11TransmittedFragmentCount);
} else {
if (ieee80211_is_first_frag(hdr->seq_ctrl))
- local->dot11FailedCount++;
+ I802_DEBUG_INC(local->dot11FailedCount);
}
if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
- (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+ ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
local->ps_sdata && !(local->scanning)) {
if (info->flags & IEEE80211_TX_STAT_ACK) {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index fff0d864adfa..ad31b2dab4f5 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -167,23 +167,16 @@ static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb)
static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata,
u16 status_code)
{
- struct ieee80211_local *local = sdata->local;
- u16 capab;
-
/* The capability will be 0 when sending a failure code */
if (status_code != 0)
return 0;
- capab = 0;
- if (ieee80211_get_sdata_band(sdata) != IEEE80211_BAND_2GHZ)
- return capab;
-
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+ if (ieee80211_get_sdata_band(sdata) == IEEE80211_BAND_2GHZ) {
+ return WLAN_CAPABILITY_SHORT_SLOT_TIME |
+ WLAN_CAPABILITY_SHORT_PREAMBLE;
+ }
- return capab;
+ return 0;
}
static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata,
@@ -527,30 +520,19 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
/* if HT support is only added in TDLS, we need an HT-operation IE */
if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference(sdata->vif.chanctx_conf);
- if (!WARN_ON(!chanctx_conf)) {
- pos = skb_put(skb, 2 +
- sizeof(struct ieee80211_ht_operation));
- /* send an empty HT operation IE */
- ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
- &chanctx_conf->def, 0);
- }
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
+ /* send an empty HT operation IE */
+ ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
+ &sdata->vif.bss_conf.chandef, 0);
}
ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
/* only include VHT-operation if not on the 2.4GHz band */
- if (band != IEEE80211_BAND_2GHZ && !ap_sta->sta.vht_cap.vht_supported &&
- sta->sta.vht_cap.vht_supported) {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference(sdata->vif.chanctx_conf);
- if (!WARN_ON(!chanctx_conf)) {
- pos = skb_put(skb, 2 +
- sizeof(struct ieee80211_vht_operation));
- ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
- &chanctx_conf->def);
- }
+ if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
+ ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
+ &sdata->vif.bss_conf.chandef);
}
rcu_read_unlock();
@@ -953,7 +935,7 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
* packet through the AP.
*/
if ((action_code == WLAN_TDLS_TEARDOWN) &&
- (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
bool try_resend; /* Should we keep skb for possible resend */
/* If not sending directly to peer - no point in keeping skb */
@@ -1194,6 +1176,12 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
switch (oper) {
case NL80211_TDLS_ENABLE_LINK:
+ if (sdata->vif.csa_active) {
+ tdls_dbg(sdata, "TDLS: disallow link during CSA\n");
+ ret = -EBUSY;
+ break;
+ }
+
rcu_read_lock();
sta = sta_info_get(sdata, peer);
if (!sta) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 4c2e7690226a..6f14591d8ca9 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -69,6 +69,17 @@
#define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG, \
__entry->rx_chains_static, __entry->rx_chains_dynamic
+#define KEY_ENTRY __field(u32, cipher) \
+ __field(u8, hw_key_idx) \
+ __field(u8, flags) \
+ __field(s8, keyidx)
+#define KEY_ASSIGN(k) __entry->cipher = (k)->cipher; \
+ __entry->flags = (k)->flags; \
+ __entry->keyidx = (k)->keyidx; \
+ __entry->hw_key_idx = (k)->hw_key_idx;
+#define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
+#define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
+
/*
@@ -522,25 +533,19 @@ TRACE_EVENT(drv_set_key,
LOCAL_ENTRY
VIF_ENTRY
STA_ENTRY
- __field(u32, cipher)
- __field(u8, hw_key_idx)
- __field(u8, flags)
- __field(s8, keyidx)
+ KEY_ENTRY
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
STA_ASSIGN;
- __entry->cipher = key->cipher;
- __entry->flags = key->flags;
- __entry->keyidx = key->keyidx;
- __entry->hw_key_idx = key->hw_key_idx;
+ KEY_ASSIGN(key);
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT KEY_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, KEY_PR_ARG
)
);
@@ -656,28 +661,25 @@ TRACE_EVENT(drv_get_stats,
)
);
-TRACE_EVENT(drv_get_tkip_seq,
+TRACE_EVENT(drv_get_key_seq,
TP_PROTO(struct ieee80211_local *local,
- u8 hw_key_idx, u32 *iv32, u16 *iv16),
+ struct ieee80211_key_conf *key),
- TP_ARGS(local, hw_key_idx, iv32, iv16),
+ TP_ARGS(local, key),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(u8, hw_key_idx)
- __field(u32, iv32)
- __field(u16, iv16)
+ KEY_ENTRY
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->hw_key_idx = hw_key_idx;
- __entry->iv32 = *iv32;
- __entry->iv16 = *iv16;
+ KEY_ASSIGN(key);
),
TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
+ LOCAL_PR_FMT KEY_PR_FMT,
+ LOCAL_PR_ARG, KEY_PR_ARG
)
);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 667111ee6a20..8410bb3bf5e8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -37,6 +37,16 @@
/* misc utils */
+static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_packets++;
+ tstats->tx_bytes += len;
+ u64_stats_update_end(&tstats->syncp);
+}
+
static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int group_addr,
int next_frag_len)
@@ -201,11 +211,11 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
struct ieee80211_if_managed *ifmgd;
/* driver doesn't support power save */
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
return TX_CONTINUE;
/* hardware does dynamic power save */
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
return TX_CONTINUE;
/* dynamic power save disabled */
@@ -421,7 +431,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (ieee80211_is_probe_req(hdr->frame_control))
return TX_CONTINUE;
- if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+ if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
info->hw_queue = tx->sdata->vif.cab_queue;
/* no stations in PS mode */
@@ -431,7 +441,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
/* device releases frame after DTIM beacon */
- if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
+ if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING))
return TX_CONTINUE;
/* buffered in mac80211 */
@@ -987,7 +997,6 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
skb_queue_walk(&tx->skbs, skb) {
ac = skb_get_queue_mapping(skb);
- tx->sta->tx_fragments++;
tx->sta->tx_bytes[ac] += skb->len;
}
if (ac >= 0)
@@ -1176,8 +1185,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
!ieee80211_is_qos_nullfunc(hdr->frame_control) &&
- (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
- !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
+ ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
+ !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
struct tid_ampdu_tx *tid_tx;
qc = ieee80211_get_qos_ctl(hdr);
@@ -1420,7 +1429,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
vif = &sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
- } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
+ } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
dev_kfree_skb(skb);
return true;
} else
@@ -1466,7 +1475,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
CALL_TXH(ieee80211_tx_h_ps_buf);
CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
CALL_TXH(ieee80211_tx_h_select_key);
- if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_rate_ctrl);
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
@@ -1481,7 +1490,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
/* handlers after fragment must be aware of tx info fragmentation! */
CALL_TXH(ieee80211_tx_h_stats);
CALL_TXH(ieee80211_tx_h_encrypt);
- if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_calculate_duration);
#undef CALL_TXH
@@ -1571,7 +1580,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
/* set up hw_queue value early */
if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
- !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+ !ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
info->hw_queue =
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
@@ -1598,9 +1607,9 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
}
if (skb_cloned(skb) &&
- (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CLONED_SKBS) ||
+ (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
!skb_clone_writable(skb, ETH_HLEN) ||
- sdata->crypto_tx_tailroom_needed_cnt))
+ (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -2387,12 +2396,455 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
return ERR_PTR(ret);
}
+/*
+ * fast-xmit overview
+ *
+ * The core idea of this fast-xmit is to remove per-packet checks by checking
+ * them out of band. ieee80211_check_fast_xmit() implements the out-of-band
+ * checks that are needed to get the sta->fast_tx pointer assigned, after which
+ * much less work can be done per packet. For example, fragmentation must be
+ * disabled or the fast_tx pointer will not be set. All the conditions are seen
+ * in the code here.
+ *
+ * Once assigned, the fast_tx data structure also caches the per-packet 802.11
+ * header and other data to aid packet processing in ieee80211_xmit_fast().
+ *
+ * The most difficult part of this is that when any of these assumptions
+ * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
+ * ieee80211_check_fast_xmit() or friends) is required to reset the data,
+ * since the per-packet code no longer checks the conditions. This is reflected
+ * by the calls to these functions throughout the rest of the code, and must be
+ * maintained if any of the TX path checks change.
+ */
+
+void ieee80211_check_fast_xmit(struct sta_info *sta)
+{
+ struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_hdr *hdr = (void *)build.hdr;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ __le16 fc;
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT))
+ return;
+
+ /* Locking here protects both the pointer itself, and against concurrent
+ * invocations winning data access races to, e.g., the key pointer that
+ * is used.
+ * Without it, the invocation of this function right after the key
+ * pointer changes wouldn't be sufficient, as another CPU could access
+ * the pointer, then stall, and then do the cache update after the CPU
+ * that invalidated the key.
+ * With the locking, such scenarios cannot happen as the check for the
+ * key and the fast-tx assignment are done atomically, so the CPU that
+ * modifies the key will either wait or other one will see the key
+ * cleared/changed already.
+ */
+ spin_lock_bh(&sta->lock);
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
+ !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION)
+ goto out;
+
+ if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ goto out;
+
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
+ test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+ goto out;
+
+ if (sdata->noack_map)
+ goto out;
+
+ /* fast-xmit doesn't handle fragmentation at all */
+ if (local->hw.wiphy->frag_threshold != (u32)-1 &&
+ !local->ops->set_frag_threshold)
+ goto out;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ goto out;
+ }
+ build.band = chanctx_conf->def.chan->band;
+ rcu_read_unlock();
+
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_ADHOC:
+ /* DA SA BSSID */
+ build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+ memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
+ build.hdr_len = 24;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ /* DA SA BSSID */
+ build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+ memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ build.hdr_len = 24;
+ break;
+ }
+
+ if (sdata->u.mgd.use_4addr) {
+ /* non-regular ethertype cannot use the fastpath */
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS);
+ /* RA TA DA SA */
+ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
+ build.hdr_len = 30;
+ break;
+ }
+ fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+ /* BSSID SA DA */
+ memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+ build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+ build.hdr_len = 24;
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ if (sdata->wdev.use_4addr) {
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS);
+ /* RA TA DA SA */
+ memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
+ build.hdr_len = 30;
+ break;
+ }
+ /* fall through */
+ case NL80211_IFTYPE_AP:
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+ /* DA BSSID SA */
+ build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
+ build.hdr_len = 24;
+ break;
+ default:
+ /* not handled on fast-xmit */
+ goto out;
+ }
+
+ if (sta->sta.wme) {
+ build.hdr_len += 2;
+ fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ }
+
+ /* We store the key here so there's no point in using rcu_dereference()
+ * but that's fine because the code that changes the pointers will call
+ * this function after doing so. For a single CPU that would be enough,
+ * for multiple see the comment above.
+ */
+ build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
+ if (!build.key)
+ build.key = rcu_access_pointer(sdata->default_unicast_key);
+ if (build.key) {
+ bool gen_iv, iv_spc, mmic;
+
+ gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
+ iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ /* don't handle software crypto */
+ if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ goto out;
+
+ switch (build.key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ /* add fixed key ID */
+ if (gen_iv) {
+ (build.hdr + build.hdr_len)[3] =
+ 0x20 | (build.key->conf.keyidx << 6);
+ build.pn_offs = build.hdr_len;
+ }
+ if (gen_iv || iv_spc)
+ build.hdr_len += IEEE80211_CCMP_HDR_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* add fixed key ID */
+ if (gen_iv) {
+ (build.hdr + build.hdr_len)[3] =
+ 0x20 | (build.key->conf.keyidx << 6);
+ build.pn_offs = build.hdr_len;
+ }
+ if (gen_iv || iv_spc)
+ build.hdr_len += IEEE80211_GCMP_HDR_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* cannot handle MMIC or IV generation in xmit-fast */
+ if (mmic || gen_iv)
+ goto out;
+ if (iv_spc)
+ build.hdr_len += IEEE80211_TKIP_IV_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* cannot handle IV generation in fast-xmit */
+ if (gen_iv)
+ goto out;
+ if (iv_spc)
+ build.hdr_len += IEEE80211_WEP_IV_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ WARN(1,
+ "management cipher suite 0x%x enabled for data\n",
+ build.key->conf.cipher);
+ goto out;
+ default:
+ /* we don't know how to generate IVs for this at all */
+ if (WARN_ON(gen_iv))
+ goto out;
+ /* pure hardware keys are OK, of course */
+ if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
+ break;
+ /* cipher scheme might require space allocation */
+ if (iv_spc &&
+ build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
+ goto out;
+ if (iv_spc)
+ build.hdr_len += build.key->conf.iv_len;
+ }
+
+ fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+
+ hdr->frame_control = fc;
+
+ memcpy(build.hdr + build.hdr_len,
+ rfc1042_header, sizeof(rfc1042_header));
+ build.hdr_len += sizeof(rfc1042_header);
+
+ fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
+ /* if the kmemdup fails, continue w/o fast_tx */
+ if (!fast_tx)
+ goto out;
+
+ out:
+ /* we might have raced against another call to this function */
+ old = rcu_dereference_protected(sta->fast_tx,
+ lockdep_is_held(&sta->lock));
+ rcu_assign_pointer(sta->fast_tx, fast_tx);
+ if (old)
+ kfree_rcu(old, rcu_head);
+ spin_unlock_bh(&sta->lock);
+}
+
+void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
+{
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list, list)
+ ieee80211_check_fast_xmit(sta);
+ rcu_read_unlock();
+}
+
+void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata &&
+ (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
+ continue;
+ ieee80211_check_fast_xmit(sta);
+ }
+
+ rcu_read_unlock();
+}
+
+void ieee80211_clear_fast_xmit(struct sta_info *sta)
+{
+ struct ieee80211_fast_tx *fast_tx;
+
+ spin_lock_bh(&sta->lock);
+ fast_tx = rcu_dereference_protected(sta->fast_tx,
+ lockdep_is_held(&sta->lock));
+ RCU_INIT_POINTER(sta->fast_tx, NULL);
+ spin_unlock_bh(&sta->lock);
+
+ if (fast_tx)
+ kfree_rcu(fast_tx, rcu_head);
+}
+
+static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
+ struct net_device *dev, struct sta_info *sta,
+ struct ieee80211_fast_tx *fast_tx,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ u16 ethertype = (skb->data[12] << 8) | skb->data[13];
+ int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
+ int hw_headroom = sdata->local->hw.extra_tx_headroom;
+ struct ethhdr eth;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
+ struct ieee80211_tx_data tx;
+ ieee80211_tx_result r;
+ struct tid_ampdu_tx *tid_tx = NULL;
+ u8 tid = IEEE80211_NUM_TIDS;
+
+ /* control port protocol needs a lot of special handling */
+ if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
+ return false;
+
+ /* only RFC 1042 SNAP */
+ if (ethertype < ETH_P_802_3_MIN)
+ return false;
+
+ /* don't handle TX status request here either */
+ if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
+ return false;
+
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (tid_tx) {
+ if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
+ return false;
+ if (tid_tx->timeout)
+ tid_tx->last_tx = jiffies;
+ }
+ }
+
+ /* after this point (skb is modified) we cannot return false */
+
+ if (skb_shared(skb)) {
+ struct sk_buff *tmp_skb = skb;
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ kfree_skb(tmp_skb);
+
+ if (!skb)
+ return true;
+ }
+
+ ieee80211_tx_stats(dev, skb->len + extra_head);
+
+ /* will not be crypto-handled beyond what we do here, so use false
+ * as the may-encrypt argument for the resize to not account for
+ * more room than we already have in 'extra_head'
+ */
+ if (unlikely(ieee80211_skb_resize(sdata, skb,
+ max_t(int, extra_head + hw_headroom -
+ skb_headroom(skb), 0),
+ false))) {
+ kfree_skb(skb);
+ return true;
+ }
+
+ memcpy(&eth, skb->data, ETH_HLEN - 2);
+ hdr = (void *)skb_push(skb, extra_head);
+ memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
+ memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
+ memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
+
+ memset(info, 0, sizeof(*info));
+ info->band = fast_tx->band;
+ info->control.vif = &sdata->vif;
+ info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
+ IEEE80211_TX_CTL_DONTFRAG |
+ (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
+
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ *ieee80211_get_qos_ctl(hdr) = tid;
+ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
+ } else {
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
+ sdata->sequence_number += 0x10;
+ }
+
+ sta->tx_msdu[tid]++;
+
+ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
+ __skb_queue_head_init(&tx.skbs);
+
+ tx.flags = IEEE80211_TX_UNICAST;
+ tx.local = local;
+ tx.sdata = sdata;
+ tx.sta = sta;
+ tx.key = fast_tx->key;
+
+ if (fast_tx->key)
+ info->control.hw_key = &fast_tx->key->conf;
+
+ if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+ tx.skb = skb;
+ r = ieee80211_tx_h_rate_ctrl(&tx);
+ skb = tx.skb;
+ tx.skb = NULL;
+
+ if (r != TX_CONTINUE) {
+ if (r != TX_QUEUED)
+ kfree_skb(skb);
+ return true;
+ }
+ }
+
+ /* statistics normally done by ieee80211_tx_h_stats (but that
+ * has to consider fragmentation, so is more complex)
+ */
+ sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_packets[skb_get_queue_mapping(skb)]++;
+
+ if (fast_tx->pn_offs) {
+ u64 pn;
+ u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
+
+ switch (fast_tx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
+ break;
+ }
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+
+ __skb_queue_tail(&tx.skbs, skb);
+ ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
+ return true;
+}
+
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev,
u32 info_flags)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct sta_info *sta;
+ struct sk_buff *next;
if (unlikely(skb->len < ETH_HLEN)) {
kfree_skb(skb);
@@ -2401,20 +2853,67 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
rcu_read_lock();
- if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
- kfree_skb(skb);
- goto out;
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+ goto out_free;
+
+ if (!IS_ERR_OR_NULL(sta)) {
+ struct ieee80211_fast_tx *fast_tx;
+
+ fast_tx = rcu_dereference(sta->fast_tx);
+
+ if (fast_tx &&
+ ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
+ goto out;
}
- skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
- if (IS_ERR(skb))
- goto out;
+ if (skb_is_gso(skb)) {
+ struct sk_buff *segs;
+
+ segs = skb_gso_segment(skb, 0);
+ if (IS_ERR(segs)) {
+ goto out_free;
+ } else if (segs) {
+ consume_skb(skb);
+ skb = segs;
+ }
+ } else {
+ /* we cannot process non-linear frames on this path */
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /* the frame could be fragmented, software-encrypted, and other
+ * things so we cannot really handle checksum offload with it -
+ * fix it up in software before we handle anything else.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb,
+ skb_checksum_start_offset(skb));
+ if (skb_checksum_help(skb))
+ goto out_free;
+ }
+ }
+
+ next = skb;
+ while (next) {
+ skb = next;
+ next = skb->next;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
+ skb->prev = NULL;
+ skb->next = NULL;
+
+ skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
+ if (IS_ERR(skb))
+ goto out;
- ieee80211_xmit(sdata, sta, skb);
+ ieee80211_tx_stats(dev, skb->len);
+
+ ieee80211_xmit(sdata, sta, skb);
+ }
+ goto out;
+ out_free:
+ kfree_skb(skb);
out:
rcu_read_unlock();
}
@@ -3308,7 +3807,7 @@ int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
synchronize_net();
/* Tear down BA sessions so we stop aggregating on this TID */
- if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
__ieee80211_stop_tx_ba_session(sta, tid,
AGG_STOP_LOCAL_REQUEST);
@@ -3322,7 +3821,7 @@ int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid)
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_RESERVE_TID);
- if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
+ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION))
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
ret = 0;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index b864ebc6ab8f..43e5aadd7a89 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -564,7 +564,7 @@ ieee80211_get_vif_queues(struct ieee80211_local *local,
{
unsigned int queues;
- if (sdata && local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
+ if (sdata && ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) {
int ac;
queues = 0;
@@ -592,7 +592,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local,
* If no queue was set, or if the HW doesn't support
* IEEE80211_HW_QUEUE_CONTROL - flush all queues
*/
- if (!queues || !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
+ if (!queues || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
queues = ieee80211_get_vif_queues(local, sdata);
ieee80211_stop_queues_by_reason(&local->hw, queues,
@@ -2046,7 +2046,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
* about the sessions, but we and the AP still think they
* are active. This is really a workaround though.
*/
- if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9d63d93c836e..943f7606527e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -444,7 +444,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
hdr = (struct ieee80211_hdr *) pos;
pos += hdrlen;
- pn64 = atomic64_inc_return(&key->u.ccmp.tx_pn);
+ pn64 = atomic64_inc_return(&key->conf.tx_pn);
pn[5] = pn64;
pn[4] = pn64 >> 8;
@@ -670,7 +670,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)pos;
pos += hdrlen;
- pn64 = atomic64_inc_return(&key->u.gcmp.tx_pn);
+ pn64 = atomic64_inc_return(&key->conf.tx_pn);
pn[5] = pn64;
pn[4] = pn64 >> 8;
@@ -940,7 +940,7 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
- pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+ pn64 = atomic64_inc_return(&key->conf.tx_pn);
bip_ipn_set64(mmie->sequence_number, pn64);
@@ -984,7 +984,7 @@ ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
- pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
+ pn64 = atomic64_inc_return(&key->conf.tx_pn);
bip_ipn_set64(mmie->sequence_number, pn64);
@@ -1129,7 +1129,7 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
mmie->key_id = cpu_to_le16(key->conf.keyidx);
/* PN = PN + 1 */
- pn64 = atomic64_inc_return(&key->u.aes_gmac.tx_pn);
+ pn64 = atomic64_inc_return(&key->conf.tx_pn);
bip_ipn_set64(mmie->sequence_number, pn64);
diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig
index aa462b480a39..fb45287ebac3 100644
--- a/net/mac802154/Kconfig
+++ b/net/mac802154/Kconfig
@@ -2,6 +2,7 @@ config MAC802154
tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
depends on IEEE802154
select CRC_CCITT
+ select CRYPTO
select CRYPTO_AUTHENC
select CRYPTO_CCM
select CRYPTO_CTR
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 70be9c799f8a..317c4662e544 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -73,9 +73,9 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
ASSERT_RTNL();
- /* check if phy support this setting */
- if (!(wpan_phy->channels_supported[page] & BIT(channel)))
- return -EINVAL;
+ if (wpan_phy->current_page == page &&
+ wpan_phy->current_channel == channel)
+ return 0;
ret = drv_set_channel(local, page, channel);
if (!ret) {
@@ -95,9 +95,8 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
ASSERT_RTNL();
- /* check if phy support this setting */
- if (!(local->hw.flags & IEEE802154_HW_CCA_MODE))
- return -EOPNOTSUPP;
+ if (wpan_phy_cca_cmp(&wpan_phy->cca, cca))
+ return 0;
ret = drv_set_cca_mode(local, cca);
if (!ret)
@@ -107,20 +106,49 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
}
static int
+ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (wpan_phy->cca_ed_level == ed_level)
+ return 0;
+
+ ret = drv_set_cca_ed_level(local, ed_level);
+ if (!ret)
+ wpan_phy->cca_ed_level = ed_level;
+
+ return ret;
+}
+
+static int
+ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power)
+{
+ struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (wpan_phy->transmit_power == power)
+ return 0;
+
+ ret = drv_set_tx_power(local, power);
+ if (!ret)
+ wpan_phy->transmit_power = power;
+
+ return ret;
+}
+
+static int
ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
__le16 pan_id)
{
ASSERT_RTNL();
- /* TODO
- * I am not sure about to check here on broadcast pan_id.
- * Broadcast is a valid setting, comment from 802.15.4:
- * If this value is 0xffff, the device is not associated.
- *
- * This could useful to simple deassociate an device.
- */
- if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
- return -EINVAL;
+ if (wpan_dev->pan_id == pan_id)
+ return 0;
wpan_dev->pan_id = pan_id;
return 0;
@@ -131,12 +159,11 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
u8 min_be, u8 max_be)
{
- struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
ASSERT_RTNL();
- if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
- return -EOPNOTSUPP;
+ if (wpan_dev->min_be == min_be &&
+ wpan_dev->max_be == max_be)
+ return 0;
wpan_dev->min_be = min_be;
wpan_dev->max_be = max_be;
@@ -149,20 +176,8 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
{
ASSERT_RTNL();
- /* TODO
- * I am not sure about to check here on broadcast short_addr.
- * Broadcast is a valid setting, comment from 802.15.4:
- * A value of 0xfffe indicates that the device has
- * associated but has not been allocated an address. A
- * value of 0xffff indicates that the device does not
- * have a short address.
- *
- * I think we should allow to set these settings but
- * don't allow to allow socket communication with it.
- */
- if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
- short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
- return -EINVAL;
+ if (wpan_dev->short_addr == short_addr)
+ return 0;
wpan_dev->short_addr = short_addr;
return 0;
@@ -173,12 +188,10 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
u8 max_csma_backoffs)
{
- struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
ASSERT_RTNL();
- if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
- return -EOPNOTSUPP;
+ if (wpan_dev->csma_retries == max_csma_backoffs)
+ return 0;
wpan_dev->csma_retries = max_csma_backoffs;
return 0;
@@ -189,12 +202,10 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
s8 max_frame_retries)
{
- struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
ASSERT_RTNL();
- if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES))
- return -EOPNOTSUPP;
+ if (wpan_dev->frame_retries == max_frame_retries)
+ return 0;
wpan_dev->frame_retries = max_frame_retries;
return 0;
@@ -204,12 +215,10 @@ static int
ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
bool mode)
{
- struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
ASSERT_RTNL();
- if (!(local->hw.flags & IEEE802154_HW_LBT))
- return -EOPNOTSUPP;
+ if (wpan_dev->lbt == mode)
+ return 0;
wpan_dev->lbt = mode;
return 0;
@@ -222,6 +231,8 @@ const struct cfg802154_ops mac802154_config_ops = {
.del_virtual_intf = ieee802154_del_iface,
.set_channel = ieee802154_set_channel,
.set_cca_mode = ieee802154_set_cca_mode,
+ .set_cca_ed_level = ieee802154_set_cca_ed_level,
+ .set_tx_power = ieee802154_set_tx_power,
.set_pan_id = ieee802154_set_pan_id,
.set_short_addr = ieee802154_set_short_addr,
.set_backoff_exponent = ieee802154_set_backoff_exponent,
diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h
index a0533357b9ea..caecd5f43aa7 100644
--- a/net/mac802154/driver-ops.h
+++ b/net/mac802154/driver-ops.h
@@ -58,7 +58,7 @@ drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
return local->ops->set_channel(&local->hw, page, channel);
}
-static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
+static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm)
{
might_sleep();
@@ -67,7 +67,7 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
return -EOPNOTSUPP;
}
- return local->ops->set_txpower(&local->hw, dbm);
+ return local->ops->set_txpower(&local->hw, mbm);
}
static inline int drv_set_cca_mode(struct ieee802154_local *local,
@@ -96,7 +96,7 @@ static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
}
static inline int
-drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
+drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
{
might_sleep();
@@ -105,7 +105,7 @@ drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
return -EOPNOTSUPP;
}
- return local->ops->set_cca_ed_level(&local->hw, ed_level);
+ return local->ops->set_cca_ed_level(&local->hw, mbm);
}
static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 127ba18386fc..eec668f3637f 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -86,8 +86,6 @@ struct ieee802154_sub_if_data {
unsigned long state;
char name[IFNAMSIZ];
- spinlock_t mib_lock;
-
/* protects sec from concurrent access by netlink. access by
* encrypt/decrypt/header_create safe without additional protection.
*/
@@ -136,12 +134,7 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
/* MIB callbacks */
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val);
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev);
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev);
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val);
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
-u8 mac802154_dev_get_dsn(const struct net_device *dev);
int mac802154_get_params(struct net_device *dev,
struct ieee802154_llsec_params *params);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 91b75abbd1a1..b544b5dc4bfb 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -62,9 +62,10 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
(struct sockaddr_ieee802154 *)&ifr->ifr_addr;
int err = -ENOIOCTLCMD;
- ASSERT_RTNL();
+ if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR)
+ return err;
- spin_lock_bh(&sdata->mib_lock);
+ rtnl_lock();
switch (cmd) {
case SIOCGIFADDR:
@@ -89,7 +90,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
case SIOCSIFADDR:
if (netif_running(dev)) {
- spin_unlock_bh(&sdata->mib_lock);
+ rtnl_unlock();
return -EBUSY;
}
@@ -111,7 +112,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
- spin_unlock_bh(&sdata->mib_lock);
+ rtnl_unlock();
return err;
}
@@ -241,7 +242,6 @@ static int mac802154_wpan_open(struct net_device *dev)
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_local *local = sdata->local;
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
- struct wpan_phy *phy = sdata->local->phy;
rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type);
if (rc < 0)
@@ -251,8 +251,6 @@ static int mac802154_wpan_open(struct net_device *dev)
if (rc < 0)
return rc;
- mutex_lock(&phy->pib_lock);
-
if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
rc = drv_set_promiscuous_mode(local,
wpan_dev->promiscuous_mode);
@@ -294,11 +292,7 @@ static int mac802154_wpan_open(struct net_device *dev)
goto out;
}
- mutex_unlock(&phy->pib_lock);
- return 0;
-
out:
- mutex_unlock(&phy->pib_lock);
return rc;
}
@@ -374,14 +368,12 @@ static int mac802154_header_create(struct sk_buff *skb,
hdr.fc.type = cb->type;
hdr.fc.security_enabled = cb->secen;
hdr.fc.ack_request = cb->ackreq;
- hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+ hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
return -EINVAL;
if (!saddr) {
- spin_lock_bh(&sdata->mib_lock);
-
if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
@@ -393,8 +385,6 @@ static int mac802154_header_create(struct sk_buff *skb,
}
hdr.source.pan_id = wpan_dev->pan_id;
-
- spin_unlock_bh(&sdata->mib_lock);
} else {
hdr.source = *(const struct ieee802154_addr *)saddr;
}
@@ -474,13 +464,16 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
enum nl802154_iftype type)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ u8 tmp;
/* set some type-dependent values */
sdata->vif.type = type;
sdata->wpan_dev.iftype = type;
- get_random_bytes(&wpan_dev->bsn, 1);
- get_random_bytes(&wpan_dev->dsn, 1);
+ get_random_bytes(&tmp, sizeof(tmp));
+ atomic_set(&wpan_dev->bsn, tmp);
+ get_random_bytes(&tmp, sizeof(tmp));
+ atomic_set(&wpan_dev->dsn, tmp);
/* defaults per 802.15.4-2011 */
wpan_dev->min_be = 3;
@@ -503,7 +496,6 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
sdata->dev->ml_priv = &mac802154_mlme_wpan;
wpan_dev->promiscuous_mode = false;
- spin_lock_init(&sdata->mib_lock);
mutex_init(&sdata->sec_mtx);
mac802154_llsec_init(&sdata->sec);
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index bdccb4ecd30f..8606da459ff3 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -36,37 +36,30 @@ static int mac802154_mlme_start_req(struct net_device *dev,
u8 pan_coord, u8 blx,
u8 coord_realign)
{
- struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
- int rc = 0;
+ struct ieee802154_llsec_params params;
+ int changed = 0;
ASSERT_RTNL();
BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
- mac802154_dev_set_pan_id(dev, addr->pan_id);
- mac802154_dev_set_short_addr(dev, addr->short_addr);
+ dev->ieee802154_ptr->pan_id = addr->pan_id;
+ dev->ieee802154_ptr->short_addr = addr->short_addr;
mac802154_dev_set_page_channel(dev, page, channel);
- if (ops->llsec) {
- struct ieee802154_llsec_params params;
- int changed = 0;
+ params.pan_id = addr->pan_id;
+ changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
- params.coord_shortaddr = addr->short_addr;
- changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
+ params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
+ changed |= IEEE802154_LLSEC_PARAM_HWADDR;
- params.pan_id = addr->pan_id;
- changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+ params.coord_hwaddr = params.hwaddr;
+ changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
- params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
- changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+ params.coord_shortaddr = addr->short_addr;
+ changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
- params.coord_hwaddr = params.hwaddr;
- changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
-
- rc = ops->llsec->set_params(dev, &params, changed);
- }
-
- return rc;
+ return mac802154_set_params(dev, &params, changed);
}
static int mac802154_set_mac_params(struct net_device *dev,
@@ -91,19 +84,19 @@ static int mac802154_set_mac_params(struct net_device *dev,
wpan_dev->frame_retries = params->frame_retries;
wpan_dev->lbt = params->lbt;
- if (local->hw.flags & IEEE802154_HW_TXPOWER) {
+ if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) {
ret = drv_set_tx_power(local, params->transmit_power);
if (ret < 0)
return ret;
}
- if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
+ if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) {
ret = drv_set_cca_mode(local, &params->cca);
if (ret < 0)
return ret;
}
- if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) {
+ if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
ret = drv_set_cca_ed_level(local, params->cca_ed_level);
if (ret < 0)
return ret;
@@ -151,9 +144,6 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = {
struct ieee802154_mlme_ops mac802154_mlme_wpan = {
.start_req = mac802154_mlme_start_req,
- .get_pan_id = mac802154_dev_get_pan_id,
- .get_short_addr = mac802154_dev_get_short_addr,
- .get_dsn = mac802154_dev_get_dsn,
.llsec = &mac802154_llsec_ops,
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 08cb32dc8fd3..356b346e1ee8 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -107,6 +107,18 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
skb_queue_head_init(&local->skb_queue);
+ /* init supported flags with 802.15.4 default ranges */
+ phy->supported.max_minbe = 8;
+ phy->supported.min_maxbe = 3;
+ phy->supported.max_maxbe = 8;
+ phy->supported.min_frame_retries = -1;
+ phy->supported.max_frame_retries = 7;
+ phy->supported.max_csma_backoffs = 5;
+ phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
+
+ /* always supported */
+ phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE);
+
return &local->hw;
}
EXPORT_SYMBOL(ieee802154_alloc_hw);
@@ -155,6 +167,26 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
ieee802154_setup_wpan_phy_pib(local->phy);
+ if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
+ local->phy->supported.min_csma_backoffs = 4;
+ local->phy->supported.max_csma_backoffs = 4;
+ local->phy->supported.min_maxbe = 5;
+ local->phy->supported.max_maxbe = 5;
+ local->phy->supported.min_minbe = 3;
+ local->phy->supported.max_minbe = 3;
+ }
+
+ if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
+ /* TODO should be 3, but our default value is -1 which means
+ * no ARET handling.
+ */
+ local->phy->supported.min_frame_retries = -1;
+ local->phy->supported.max_frame_retries = -1;
+ }
+
+ if (hw->flags & IEEE802154_HW_PROMISCUOUS)
+ local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR);
+
rc = wpan_phy_register(local->phy);
if (rc < 0)
goto out_wq;
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index 5cf019a57fd7..73f94fbf8785 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -26,81 +26,22 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val)
-{
- struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- spin_lock_bh(&sdata->mib_lock);
- sdata->wpan_dev.short_addr = val;
- spin_unlock_bh(&sdata->mib_lock);
-}
-
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev)
-{
- struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
- __le16 ret;
-
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- spin_lock_bh(&sdata->mib_lock);
- ret = sdata->wpan_dev.short_addr;
- spin_unlock_bh(&sdata->mib_lock);
-
- return ret;
-}
-
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev)
-{
- struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
- __le16 ret;
-
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- spin_lock_bh(&sdata->mib_lock);
- ret = sdata->wpan_dev.pan_id;
- spin_unlock_bh(&sdata->mib_lock);
-
- return ret;
-}
-
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val)
-{
- struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- spin_lock_bh(&sdata->mib_lock);
- sdata->wpan_dev.pan_id = val;
- spin_unlock_bh(&sdata->mib_lock);
-}
-
-u8 mac802154_dev_get_dsn(const struct net_device *dev)
-{
- struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
- BUG_ON(dev->type != ARPHRD_IEEE802154);
-
- return sdata->wpan_dev.dsn++;
-}
-
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
{
struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
struct ieee802154_local *local = sdata->local;
int res;
+ ASSERT_RTNL();
+
BUG_ON(dev->type != ARPHRD_IEEE802154);
res = drv_set_channel(local, page, chan);
if (res) {
pr_debug("set_channel failed\n");
} else {
- mutex_lock(&local->phy->pib_lock);
local->phy->current_channel = chan;
local->phy->current_page = page;
- mutex_unlock(&local->phy->pib_lock);
}
}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index c0d67b2b4132..e0f10063cac3 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -47,8 +47,6 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
- spin_lock_bh(&sdata->mib_lock);
-
span = wpan_dev->pan_id;
sshort = wpan_dev->short_addr;
@@ -83,13 +81,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
skb->pkt_type = PACKET_OTHERHOST;
break;
default:
- spin_unlock_bh(&sdata->mib_lock);
pr_debug("invalid dest mode\n");
goto fail;
}
- spin_unlock_bh(&sdata->mib_lock);
-
skb->dev = sdata->dev;
rc = mac802154_llsec_decrypt(&sdata->sec, skb);
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 150bf807e572..583435f38930 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -85,11 +85,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
hrtimer_start(&local->ifs_timer,
ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
HRTIMER_MODE_REL);
-
- consume_skb(skb);
} else {
ieee802154_wake_queue(hw);
- consume_skb(skb);
}
+
+ dev_consume_skb_any(skb);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 809df534a720..0183b32da942 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -62,6 +62,7 @@ out:
static struct packet_offload mpls_mc_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_MC),
+ .priority = 15,
.callbacks = {
.gso_segment = mpls_gso_segment,
},
@@ -69,6 +70,7 @@ static struct packet_offload mpls_mc_offload __read_mostly = {
static struct packet_offload mpls_uc_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_MPLS_UC),
+ .priority = 15,
.callbacks = {
.gso_segment = mpls_gso_segment,
},
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index a0f3e6a3c7d1..fbc8d15c7fda 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1,6 +1,14 @@
menu "Core Netfilter Configuration"
depends on NET && INET && NETFILTER
+config NETFILTER_INGRESS
+ bool "Netfilter ingress support"
+ default y
+ select NET_INGRESS
+ help
+ This allows you to classify packets from ingress using the Netfilter
+ infrastructure.
+
config NETFILTER_NETLINK
tristate
@@ -448,6 +456,11 @@ config NF_TABLES_INET
help
This option enables support for a mixed IPv4/IPv6 "inet" table.
+config NF_TABLES_NETDEV
+ tristate "Netfilter nf_tables netdev tables support"
+ help
+ This option enables support for the "netdev" table.
+
config NFT_EXTHDR
tristate "Netfilter nf_tables IPv6 exthdr module"
help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a87d8b8ec730..70d026d46fe7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -75,6 +75,7 @@ nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
obj-$(CONFIG_NF_TABLES) += nf_tables.o
obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o
+obj-$(CONFIG_NF_TABLES_NETDEV) += nf_tables_netdev.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o
obj-$(CONFIG_NFT_META) += nft_meta.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e6163017c42d..653e32eac08c 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -64,10 +64,27 @@ static DEFINE_MUTEX(nf_hook_mutex);
int nf_register_hook(struct nf_hook_ops *reg)
{
+ struct list_head *nf_hook_list;
struct nf_hook_ops *elem;
mutex_lock(&nf_hook_mutex);
- list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) {
+ switch (reg->pf) {
+ case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (reg->hooknum == NF_NETDEV_INGRESS) {
+ BUG_ON(reg->dev == NULL);
+ nf_hook_list = &reg->dev->nf_hooks_ingress;
+ net_inc_ingress_queue();
+ break;
+ }
+#endif
+ /* Fall through. */
+ default:
+ nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
+ break;
+ }
+
+ list_for_each_entry(elem, nf_hook_list, list) {
if (reg->priority < elem->priority)
break;
}
@@ -85,6 +102,18 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
mutex_lock(&nf_hook_mutex);
list_del_rcu(&reg->list);
mutex_unlock(&nf_hook_mutex);
+ switch (reg->pf) {
+ case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+ if (reg->hooknum == NF_NETDEV_INGRESS) {
+ net_dec_ingress_queue();
+ break;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
@@ -166,11 +195,9 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
/* We may already have this, but read-locks nest anyway */
rcu_read_lock();
- elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
- struct nf_hook_ops, list);
+ elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
next_hook:
- verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
- &elem);
+ verdict = nf_iterate(state->hook_list, skb, state, &elem);
if (verdict == NF_ACCEPT || verdict == NF_STOP) {
ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 55b083ec587a..2fe6de46f6d0 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -36,6 +36,7 @@ IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip");
#define MTYPE bitmap_ip
+#define HOST_MASK 32
/* Type structure */
struct bitmap_ip {
@@ -149,8 +150,11 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -174,7 +178,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!cidr || cidr > 32)
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
} else
@@ -277,7 +281,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (cidr >= 32)
+ if (cidr >= HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(first_ip, last_ip, cidr);
} else
@@ -286,7 +290,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
- if (netmask > 32)
+ if (netmask > HOST_MASK)
return -IPSET_ERR_INVALID_NETMASK;
first_ip &= ip_set_hostmask(netmask);
@@ -360,7 +364,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 86104744b00f..eb188561d65f 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -36,6 +36,7 @@ IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
MODULE_ALIAS("ip_set_bitmap:ip,mac");
#define MTYPE bitmap_ipmac
+#define HOST_MASK 32
#define IP_SET_BITMAP_STORED_TIMEOUT
enum {
@@ -250,8 +251,11 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -343,7 +347,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (cidr >= 32)
+ if (cidr >= HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(first_ip, last_ip, cidr);
} else
@@ -397,7 +401,8 @@ static struct ip_set_type bitmap_ipmac_type = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 005dd36444c3..898edb693b3f 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -294,7 +294,8 @@ static struct ip_set_type bitmap_port_type = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d259da3ce67a..475e4960a164 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -42,7 +42,7 @@ static inline struct ip_set_net *ip_set_pernet(struct net *net)
}
#define IP_SET_INC 64
-#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+#define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
static unsigned int max_sets;
@@ -85,7 +85,7 @@ find_set_type(const char *name, u8 family, u8 revision)
struct ip_set_type *type;
list_for_each_entry_rcu(type, &ip_set_type_list, list)
- if (STREQ(type->name, name) &&
+ if (STRNCMP(type->name, name) &&
(type->family == family ||
type->family == NFPROTO_UNSPEC) &&
revision >= type->revision_min &&
@@ -132,7 +132,7 @@ __find_set_type_get(const char *name, u8 family, u8 revision,
/* Make sure the type is already loaded
* but we don't support the revision */
list_for_each_entry_rcu(type, &ip_set_type_list, list)
- if (STREQ(type->name, name)) {
+ if (STRNCMP(type->name, name)) {
err = -IPSET_ERR_FIND_TYPE;
goto unlock;
}
@@ -166,7 +166,7 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
*min = 255; *max = 0;
rcu_read_lock();
list_for_each_entry_rcu(type, &ip_set_type_list, list)
- if (STREQ(type->name, name) &&
+ if (STRNCMP(type->name, name) &&
(type->family == family ||
type->family == NFPROTO_UNSPEC)) {
found = true;
@@ -365,7 +365,7 @@ size_t
ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
{
enum ip_set_ext_id id;
- size_t offset = 0;
+ size_t offset = len;
u32 cadt_flags = 0;
if (tb[IPSET_ATTR_CADT_FLAGS])
@@ -375,12 +375,12 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
- offset += ALIGN(len + offset, ip_set_extensions[id].align);
+ offset = ALIGN(offset, ip_set_extensions[id].align);
set->offset[id] = offset;
set->extensions |= ip_set_extensions[id].type;
offset += ip_set_extensions[id].len;
}
- return len + offset;
+ return offset;
}
EXPORT_SYMBOL_GPL(ip_set_elem_len);
@@ -432,6 +432,31 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
}
EXPORT_SYMBOL_GPL(ip_set_get_extensions);
+int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+ const void *e, bool active)
+{
+ if (SET_WITH_TIMEOUT(set)) {
+ unsigned long *timeout = ext_timeout(e, set);
+
+ if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(active ? ip_set_timeout_get(timeout)
+ : *timeout)))
+ return -EMSGSIZE;
+ }
+ if (SET_WITH_COUNTER(set) &&
+ ip_set_put_counter(skb, ext_counter(e, set)))
+ return -EMSGSIZE;
+ if (SET_WITH_COMMENT(set) &&
+ ip_set_put_comment(skb, ext_comment(e, set)))
+ return -EMSGSIZE;
+ if (SET_WITH_SKBINFO(set) &&
+ ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
+ return -EMSGSIZE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_put_extensions);
+
/*
* Creating/destroying/renaming/swapping affect the existence and
* the properties of a set. All of these can be executed from userspace
@@ -581,7 +606,7 @@ ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
rcu_read_lock();
for (i = 0; i < inst->ip_set_max; i++) {
s = rcu_dereference(inst->ip_set_list)[i];
- if (s != NULL && STREQ(s->name, name)) {
+ if (s != NULL && STRNCMP(s->name, name)) {
__ip_set_get(s);
index = i;
*set = s;
@@ -758,7 +783,7 @@ find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
*id = IPSET_INVALID_ID;
for (i = 0; i < inst->ip_set_max; i++) {
set = ip_set(inst, i);
- if (set != NULL && STREQ(set->name, name)) {
+ if (set != NULL && STRNCMP(set->name, name)) {
*id = i;
break;
}
@@ -787,7 +812,7 @@ find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
if (s == NULL) {
if (*index == IPSET_INVALID_ID)
*index = i;
- } else if (STREQ(name, s->name)) {
+ } else if (STRNCMP(name, s->name)) {
/* Name clash */
*set = s;
return -EEXIST;
@@ -887,7 +912,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
if (ret == -EEXIST) {
/* If this is the same set and requested, ignore error */
if ((flags & IPSET_FLAG_EXIST) &&
- STREQ(set->type->name, clash->type->name) &&
+ STRNCMP(set->type->name, clash->type->name) &&
set->type->family == clash->type->family &&
set->type->revision_min == clash->type->revision_min &&
set->type->revision_max == clash->type->revision_max &&
@@ -1098,7 +1123,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
- if (s != NULL && STREQ(s->name, name2)) {
+ if (s != NULL && STRNCMP(s->name, name2)) {
ret = -IPSET_ERR_EXIST_SETNAME2;
goto out;
}
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 29fb01ddff93..1981f021cc60 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -98,7 +98,7 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto)
{
const struct iphdr *iph = ip_hdr(skb);
- unsigned int protooff = ip_hdrlen(skb);
+ unsigned int protooff = skb_network_offset(skb) + ip_hdrlen(skb);
int protocol = iph->protocol;
/* See comments at tcp_match in ip_tables.c */
@@ -135,7 +135,9 @@ ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
__be16 frag_off = 0;
nexthdr = ipv6_hdr(skb)->nexthdr;
- protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ protoff = ipv6_skip_exthdr(skb,
+ skb_network_offset(skb) +
+ sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
return false;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 974ff386db0f..7952869c8023 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -180,6 +180,7 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#undef mtype_data_equal
#undef mtype_do_data_match
#undef mtype_data_set_flags
+#undef mtype_data_reset_elem
#undef mtype_data_reset_flags
#undef mtype_data_netmask
#undef mtype_data_list
@@ -193,7 +194,6 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#undef mtype_ahash_memsize
#undef mtype_flush
#undef mtype_destroy
-#undef mtype_gc_init
#undef mtype_same_set
#undef mtype_kadt
#undef mtype_uadt
@@ -227,6 +227,7 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#define mtype_data_list IPSET_TOKEN(MTYPE, _data_list)
#define mtype_data_next IPSET_TOKEN(MTYPE, _data_next)
#define mtype_elem IPSET_TOKEN(MTYPE, _elem)
+
#define mtype_ahash_destroy IPSET_TOKEN(MTYPE, _ahash_destroy)
#define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
#define mtype_add_cidr IPSET_TOKEN(MTYPE, _add_cidr)
@@ -234,7 +235,6 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#define mtype_ahash_memsize IPSET_TOKEN(MTYPE, _ahash_memsize)
#define mtype_flush IPSET_TOKEN(MTYPE, _flush)
#define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
-#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
#define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
#define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
#define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
@@ -249,9 +249,18 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#define mtype_head IPSET_TOKEN(MTYPE, _head)
#define mtype_list IPSET_TOKEN(MTYPE, _list)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
+#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
#define mtype_variant IPSET_TOKEN(MTYPE, _variant)
#define mtype_data_match IPSET_TOKEN(MTYPE, _data_match)
+#ifndef MTYPE
+#error "MTYPE is not defined!"
+#endif
+
+#ifndef HOST_MASK
+#error "HOST_MASK is not defined!"
+#endif
+
#ifndef HKEY_DATALEN
#define HKEY_DATALEN sizeof(struct mtype_elem)
#endif
@@ -261,6 +270,9 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
& jhash_mask(htable_bits))
#ifndef htype
+#ifndef HTYPE
+#error "HTYPE is not defined!"
+#endif /* HTYPE */
#define htype HTYPE
/* The generic hash structure */
@@ -287,7 +299,7 @@ struct htype {
struct net_prefixes nets[0]; /* book-keeping of prefixes */
#endif
};
-#endif
+#endif /* htype */
#ifdef IP_SET_HASH_WITH_NETS
/* Network cidr size book keeping when the hash stores different
@@ -1045,7 +1057,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
u8 netmask;
#endif
size_t hsize;
- struct HTYPE *h;
+ struct htype *h;
struct htable *t;
#ifndef IP_SET_PROTO_UNDEF
@@ -1165,3 +1177,5 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
return 0;
}
#endif /* IP_SET_EMIT_CREATE */
+
+#undef HKEY_DATALEN
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 76959d79e9d1..54df48b5c455 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -56,15 +56,15 @@ hash_ip4_data_equal(const struct hash_ip4_elem *e1,
return e1->ip == e2->ip;
}
-static inline bool
+static bool
hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
{
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -74,7 +74,6 @@ hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e)
}
#define MTYPE hash_ip4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -121,8 +120,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -145,7 +147,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!cidr || cidr > 32)
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
@@ -196,10 +198,10 @@ hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e)
{
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -208,12 +210,9 @@ hash_ip6_data_next(struct hash_ip4_elem *next, const struct hash_ip6_elem *e)
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
-#undef HKEY_DATALEN
#define MTYPE hash_ip6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
@@ -261,8 +260,11 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -301,7 +303,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index 7abf9788cfa8..d231248eb3e2 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -63,10 +63,10 @@ hash_ipmark4_data_list(struct sk_buff *skb,
if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -76,10 +76,8 @@ hash_ipmark4_data_next(struct hash_ipmark4_elem *next,
next->ip = d->ip;
}
-#define MTYPE hash_ipmark4
-#define PF 4
-#define HOST_MASK 32
-#define HKEY_DATALEN sizeof(struct hash_ipmark4_elem)
+#define MTYPE hash_ipmark4
+#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
@@ -123,12 +121,15 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
- e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+ ret = ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e.mark &= h->markmask;
if (adt == IPSET_TEST ||
@@ -147,7 +148,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!cidr || cidr > 32)
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
@@ -191,10 +192,10 @@ hash_ipmark6_data_list(struct sk_buff *skb,
if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -204,15 +205,11 @@ hash_ipmark6_data_next(struct hash_ipmark4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
-#undef HKEY_DATALEN
#define MTYPE hash_ipmark6
-#define PF 6
#define HOST_MASK 128
-#define HKEY_DATALEN sizeof(struct hash_ipmark6_elem)
-#define IP_SET_EMIT_CREATE
+#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -258,12 +255,15 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
- e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+ e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
e.mark &= h->markmask;
if (adt == IPSET_TEST) {
@@ -307,7 +307,8 @@ static struct ip_set_type hash_ipmark_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index dcbcceb9a52f..a47c29f12090 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -69,10 +69,10 @@ hash_ipport4_data_list(struct sk_buff *skb,
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -83,10 +83,8 @@ hash_ipport4_data_next(struct hash_ipport4_elem *next,
next->port = d->port;
}
-#define MTYPE hash_ipport4
-#define PF 4
-#define HOST_MASK 32
-#define HKEY_DATALEN sizeof(struct hash_ipport4_elem)
+#define MTYPE hash_ipport4
+#define HOST_MASK 32
#include "ip_set_hash_gen.h"
static int
@@ -132,15 +130,15 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ ret = ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -171,7 +169,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!cidr || cidr > 32)
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
@@ -231,10 +229,10 @@ hash_ipport6_data_list(struct sk_buff *skb,
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -245,15 +243,11 @@ hash_ipport6_data_next(struct hash_ipport4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
-#undef HKEY_DATALEN
#define MTYPE hash_ipport6
-#define PF 6
#define HOST_MASK 128
-#define HKEY_DATALEN sizeof(struct hash_ipport6_elem)
-#define IP_SET_EMIT_CREATE
+#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
static int
@@ -301,15 +295,15 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
if (ret)
return ret;
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ ret = ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -376,7 +370,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 7ef93fc887a1..89615f134845 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -70,10 +70,10 @@ hash_ipportip4_data_list(struct sk_buff *skb,
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -86,7 +86,6 @@ hash_ipportip4_data_next(struct hash_ipportip4_elem *next,
/* Common functions */
#define MTYPE hash_ipportip4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -134,8 +133,11 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -143,10 +145,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -177,7 +176,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!cidr || cidr > 32)
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
@@ -240,10 +239,10 @@ hash_ipportip6_data_list(struct sk_buff *skb,
nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -254,11 +253,9 @@ hash_ipportip6_data_next(struct hash_ipportip4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
#define MTYPE hash_ipportip6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -309,8 +306,11 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -318,10 +318,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -388,7 +385,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index b6012ad92781..6ba7a7e083f9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -114,10 +114,10 @@ hash_ipportnet4_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -130,7 +130,6 @@ hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next,
}
#define MTYPE hash_ipportnet4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -189,8 +188,11 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -205,10 +207,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
e.cidr = cidr - 1;
}
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -249,7 +248,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
} else if (tb[IPSET_ATTR_CIDR]) {
cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!cidr || cidr > 32)
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
}
@@ -367,10 +366,10 @@ hash_ipportnet6_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -381,11 +380,9 @@ hash_ipportnet6_data_next(struct hash_ipportnet4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
#define MTYPE hash_ipportnet6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -448,8 +445,11 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -466,10 +466,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
ip6_netmask(&e.ip2, e.cidr + 1);
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -547,7 +544,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index 65690b52a4d5..1f8668d7a538 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -52,7 +52,12 @@ hash_mac4_data_equal(const struct hash_mac4_elem *e1,
static inline bool
hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
{
- return nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether);
+ if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
+ goto nla_put_failure;
+ return false;
+
+nla_put_failure:
+ return true;
}
static inline void
@@ -62,7 +67,6 @@ hash_mac4_data_next(struct hash_mac4_elem *next,
}
#define MTYPE hash_mac4
-#define PF 4
#define HOST_MASK 32
#define IP_SET_EMIT_CREATE
#define IP_SET_PROTO_UNDEF
@@ -149,7 +153,8 @@ static struct ip_set_type hash_mac_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 6b3ac10ac2f1..2e63dad8644d 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -95,10 +95,10 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -109,7 +109,6 @@ hash_net4_data_next(struct hash_net4_elem *next,
}
#define MTYPE hash_net4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -160,8 +159,11 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -264,10 +266,10 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -277,11 +279,9 @@ hash_net6_data_next(struct hash_net4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
#define MTYPE hash_net6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -333,8 +333,11 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -383,7 +386,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 380ef5148ea1..fe481f677f56 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -193,10 +193,10 @@ hash_netiface4_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -207,7 +207,6 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
}
#define MTYPE hash_netiface4
-#define PF 4
#define HOST_MASK 32
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
#include "ip_set_hash_gen.h"
@@ -308,8 +307,11 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -444,10 +446,10 @@ hash_netiface6_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -457,12 +459,9 @@ hash_netiface6_data_next(struct hash_netiface4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
-#undef HKEY_DATALEN
#define MTYPE hash_netiface6
-#define PF 6
#define HOST_MASK 128
#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
#define IP_SET_EMIT_CREATE
@@ -546,8 +545,11 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -613,7 +615,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index ea8772afb6e7..847047483560 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -128,7 +128,6 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
}
#define MTYPE hash_netnet4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -182,9 +181,15 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -354,11 +359,9 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
#define MTYPE hash_netnet6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -411,9 +414,15 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
- ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -470,7 +479,8 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index c0ddb58d19dc..8273819c1a2f 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -110,10 +110,10 @@ hash_netport4_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -125,7 +125,6 @@ hash_netport4_data_next(struct hash_netport4_elem *next,
}
#define MTYPE hash_netport4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -182,8 +181,11 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -194,10 +196,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
e.cidr = cidr - 1;
}
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -326,10 +325,10 @@ hash_netport6_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -340,11 +339,9 @@ hash_netport6_data_next(struct hash_netport4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
#define MTYPE hash_netport6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -404,8 +401,11 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -417,10 +417,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
}
ip6_netmask(&e.ip, e.cidr + 1);
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -495,7 +492,8 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index bfaa94c7baa7..1451a8ac938f 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -54,7 +54,7 @@ struct hash_netportnet4_elem {
u16 ccmp;
};
u16 padding;
- u8 nomatch:1;
+ u8 nomatch;
u8 proto;
};
@@ -124,10 +124,10 @@ hash_netportnet4_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -139,7 +139,6 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
}
#define MTYPE hash_netportnet4
-#define PF 4
#define HOST_MASK 32
#include "ip_set_hash_gen.h"
@@ -200,9 +199,15 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
- ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -220,10 +225,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
e.cidr[1] = cidr;
}
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -326,7 +328,7 @@ struct hash_netportnet6_elem {
u16 ccmp;
};
u16 padding;
- u8 nomatch:1;
+ u8 nomatch;
u8 proto;
};
@@ -397,10 +399,10 @@ hash_netportnet6_data_list(struct sk_buff *skb,
(flags &&
nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
goto nla_put_failure;
- return 0;
+ return false;
nla_put_failure:
- return 1;
+ return true;
}
static inline void
@@ -411,11 +413,9 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
}
#undef MTYPE
-#undef PF
#undef HOST_MASK
#define MTYPE hash_netportnet6
-#define PF 6
#define HOST_MASK 128
#define IP_SET_EMIT_CREATE
#include "ip_set_hash_gen.h"
@@ -477,9 +477,15 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_LINENO])
*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
- ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
- ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
- ip_set_get_extensions(set, tb, &ext);
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_extensions(set, tb, &ext);
if (ret)
return ret;
@@ -496,10 +502,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
ip6_netmask(&e.ip[0], e.cidr[0]);
ip6_netmask(&e.ip[1], e.cidr[1]);
- if (tb[IPSET_ATTR_PORT])
- e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
- else
- return -IPSET_ERR_PROTOCOL;
+ e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
if (tb[IPSET_ATTR_PROTO]) {
e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -577,7 +580,8 @@ static struct ip_set_type hash_netportnet_type __read_mostly = {
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index f8f682806e36..5bd3b1eae3fa 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -678,7 +678,8 @@ static struct ip_set_type list_set_type __read_mostly = {
[IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
- [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAX_COMMENT_SIZE },
[IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
[IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
[IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 19b9cce6c210..b08ba9538d12 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1457,18 +1457,12 @@ static struct socket *make_send_sock(struct net *net, int id)
struct socket *sock;
int result;
- /* First create a socket move it to right name space later */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ /* First create a socket */
+ result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
- /*
- * Kernel sockets that are a part of a namespace, should not
- * hold a reference to a namespace in order to allow to stop it.
- * After sk_change_net should be released using sk_release_kernel.
- */
- sk_change_net(sock->sk, net);
result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
@@ -1497,7 +1491,7 @@ static struct socket *make_send_sock(struct net *net, int id)
return sock;
error:
- sk_release_kernel(sock->sk);
+ sock_release(sock);
return ERR_PTR(result);
}
@@ -1518,17 +1512,11 @@ static struct socket *make_receive_sock(struct net *net, int id)
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
- /*
- * Kernel sockets that are a part of a namespace, should not
- * hold a reference to a namespace in order to allow to stop it.
- * After sk_change_net should be released using sk_release_kernel.
- */
- sk_change_net(sock->sk, net);
/* it is equivalent to the REUSEADDR option in user-space */
sock->sk->sk_reuse = SK_CAN_REUSE;
result = sysctl_sync_sock_size(ipvs);
@@ -1554,7 +1542,7 @@ static struct socket *make_receive_sock(struct net *net, int id)
return sock;
error:
- sk_release_kernel(sock->sk);
+ sock_release(sock);
return ERR_PTR(result);
}
@@ -1692,7 +1680,7 @@ done:
ip_vs_sync_buff_release(sb);
/* release the sending multicast socket */
- sk_release_kernel(tinfo->sock->sk);
+ sock_release(tinfo->sock);
kfree(tinfo);
return 0;
@@ -1729,7 +1717,7 @@ static int sync_thread_backup(void *data)
}
/* release the sending multicast socket */
- sk_release_kernel(tinfo->sock->sk);
+ sock_release(tinfo->sock);
kfree(tinfo->buf);
kfree(tinfo);
@@ -1854,11 +1842,11 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
return 0;
outsocket:
- sk_release_kernel(sock->sk);
+ sock_release(sock);
outtinfo:
if (tinfo) {
- sk_release_kernel(tinfo->sock->sk);
+ sock_release(tinfo->sock);
kfree(tinfo->buf);
kfree(tinfo);
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 19986ec5f21a..bf66a8657a5f 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -364,13 +364,16 @@ err_unreach:
#ifdef CONFIG_IP_VS_IPV6
static struct dst_entry *
__ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
- struct in6_addr *ret_saddr, int do_xfrm)
+ struct in6_addr *ret_saddr, int do_xfrm, int rt_mode)
{
struct dst_entry *dst;
struct flowi6 fl6 = {
.daddr = *daddr,
};
+ if (rt_mode & IP_VS_RT_MODE_KNOWN_NH)
+ fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
+
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error)
goto out_err;
@@ -427,7 +430,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
}
dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
&dest_dst->dst_saddr.in6,
- do_xfrm);
+ do_xfrm, rt_mode);
if (!dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
@@ -435,7 +438,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
goto err_unreach;
}
rt = (struct rt6_info *) dst;
- cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ cookie = rt6_get_cookie(rt);
__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
@@ -446,7 +449,8 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
*ret_saddr = dest_dst->dst_saddr.in6;
} else {
noref = 0;
- dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
+ dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm,
+ rt_mode);
if (!dst)
goto err_unreach;
rt = (struct rt6_info *) dst;
@@ -781,7 +785,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
- ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
+ ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to loopback address");
@@ -1164,7 +1168,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
- IP_VS_RT_MODE_NON_LOCAL);
+ IP_VS_RT_MODE_NON_LOCAL |
+ IP_VS_RT_MODE_KNOWN_NH);
if (local < 0)
goto tx_error;
if (local) {
@@ -1346,7 +1351,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
- ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
+ ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI6\n",
__func__, &cp->daddr.in6);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 1d69f5b9748f..9511af04dc81 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -779,8 +779,8 @@ static int callforward_do_filter(struct net *net,
flowi6_to_flowi(&fl1), false)) {
if (!afinfo->route(net, (struct dst_entry **)&rt2,
flowi6_to_flowi(&fl2), false)) {
- if (ipv6_addr_equal(rt6_nexthop(rt1),
- rt6_nexthop(rt2)) &&
+ if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
+ rt6_nexthop(rt2, &fl2.daddr)) &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
dst_release(&rt2->dst);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 34ded09317e7..4528f122bcd2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -399,6 +399,8 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
[NFTA_TABLE_NAME] = { .type = NLA_STRING,
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
+ [NFTA_TABLE_DEV] = { .type = NLA_STRING,
+ .len = IFNAMSIZ - 1 },
};
static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
@@ -423,6 +425,10 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
goto nla_put_failure;
+ if (table->dev &&
+ nla_put_string(skb, NFTA_TABLE_DEV, table->dev->name))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -608,6 +614,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if (flags == ctx->table->flags)
return 0;
+ if ((ctx->afi->flags & NFT_AF_NEEDS_DEV) &&
+ ctx->nla[NFTA_TABLE_DEV] &&
+ nla_strcmp(ctx->nla[NFTA_TABLE_DEV], ctx->table->dev->name))
+ return -EOPNOTSUPP;
+
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
sizeof(struct nft_trans_table));
if (trans == NULL)
@@ -645,6 +656,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
struct nft_table *table;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
+ struct net_device *dev = NULL;
u32 flags = 0;
struct nft_ctx ctx;
int err;
@@ -679,30 +691,50 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
return -EINVAL;
}
+ if (afi->flags & NFT_AF_NEEDS_DEV) {
+ char ifname[IFNAMSIZ];
+
+ if (!nla[NFTA_TABLE_DEV])
+ return -EOPNOTSUPP;
+
+ nla_strlcpy(ifname, nla[NFTA_TABLE_DEV], IFNAMSIZ);
+ dev = dev_get_by_name(net, ifname);
+ if (!dev)
+ return -ENOENT;
+ } else if (nla[NFTA_TABLE_DEV]) {
+ return -EOPNOTSUPP;
+ }
+
+ err = -EAFNOSUPPORT;
if (!try_module_get(afi->owner))
- return -EAFNOSUPPORT;
+ goto err1;
err = -ENOMEM;
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (table == NULL)
- goto err1;
+ goto err2;
nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
INIT_LIST_HEAD(&table->chains);
INIT_LIST_HEAD(&table->sets);
table->flags = flags;
+ table->dev = dev;
nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
if (err < 0)
- goto err2;
+ goto err3;
list_add_tail_rcu(&table->list, &afi->tables);
return 0;
-err2:
+err3:
kfree(table);
-err1:
+err2:
module_put(afi->owner);
+err1:
+ if (dev != NULL)
+ dev_put(dev);
+
return err;
}
@@ -806,6 +838,9 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
{
BUG_ON(ctx->table->use > 0);
+ if (ctx->table->dev)
+ dev_put(ctx->table->dev);
+
kfree(ctx->table);
module_put(ctx->afi->owner);
}
@@ -1361,6 +1396,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
ops->priority = priority;
ops->priv = chain;
ops->hook = afi->hooks[ops->hooknum];
+ ops->dev = table->dev;
if (hookfn)
ops->hook = hookfn;
if (afi->hook_ops_init)
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
new file mode 100644
index 000000000000..04cb17057f46
--- /dev/null
+++ b/net/netfilter/nf_tables_netdev.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <net/netfilter/nf_tables.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static inline void
+nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct iphdr *iph, _iph;
+ u32 len, thoff;
+
+ nft_set_pktinfo(pkt, ops, skb, state);
+
+ iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+ &_iph);
+ if (!iph)
+ return;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len)
+ return;
+ else if (len < thoff)
+ return;
+
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+}
+
+static inline void
+__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
+ int protohdr;
+ u32 pkt_len;
+
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+ &_ip6h);
+ if (!ip6h)
+ return;
+
+ if (ip6h->version != 6)
+ return;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len)
+ return;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+ if (protohdr < 0)
+ return;
+
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+#endif
+}
+
+static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+ const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, ops, skb, state);
+ __nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state);
+}
+
+static unsigned int
+nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nft_pktinfo pkt;
+
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state);
+ break;
+ case htons(ETH_P_IPV6):
+ nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state);
+ break;
+ default:
+ nft_set_pktinfo(&pkt, ops, skb, state);
+ break;
+ }
+
+ return nft_do_chain(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_netdev __read_mostly = {
+ .family = NFPROTO_NETDEV,
+ .nhooks = NF_NETDEV_NUMHOOKS,
+ .owner = THIS_MODULE,
+ .flags = NFT_AF_NEEDS_DEV,
+ .nops = 1,
+ .hooks = {
+ [NF_NETDEV_INGRESS] = nft_do_chain_netdev,
+ },
+};
+
+static int nf_tables_netdev_init_net(struct net *net)
+{
+ net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+ if (net->nft.netdev == NULL)
+ return -ENOMEM;
+
+ memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev));
+
+ if (nft_register_afinfo(net, net->nft.netdev) < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(net->nft.netdev);
+ return -ENOMEM;
+}
+
+static void nf_tables_netdev_exit_net(struct net *net)
+{
+ nft_unregister_afinfo(net->nft.netdev);
+ kfree(net->nft.netdev);
+}
+
+static struct pernet_operations nf_tables_netdev_net_ops = {
+ .init = nf_tables_netdev_init_net,
+ .exit = nf_tables_netdev_exit_net,
+};
+
+static const struct nf_chain_type nft_filter_chain_netdev = {
+ .name = "filter",
+ .type = NFT_CHAIN_T_DEFAULT,
+ .family = NFPROTO_NETDEV,
+ .owner = THIS_MODULE,
+ .hook_mask = (1 << NF_NETDEV_INGRESS),
+};
+
+static int __init nf_tables_netdev_init(void)
+{
+ int ret;
+
+ nft_register_chain_type(&nft_filter_chain_netdev);
+ ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
+ if (ret < 0)
+ nft_unregister_chain_type(&nft_filter_chain_netdev);
+
+ return ret;
+}
+
+static void __exit nf_tables_netdev_exit(void)
+{
+ unregister_pernet_subsys(&nf_tables_netdev_net_ops);
+ nft_unregister_chain_type(&nft_filter_chain_netdev);
+}
+
+module_init(nf_tables_netdev_init);
+module_exit(nf_tables_netdev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 11c7682fa0ea..22a5ac76683e 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -1257,7 +1257,7 @@ static int seq_show(struct seq_file *s, void *v)
inst->copy_mode, inst->copy_range,
inst->queue_dropped, inst->queue_user_dropped,
inst->id_sequence, 1);
- return seq_has_overflowed(s);
+ return 0;
}
static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 7f29cfc76349..66def315eb56 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -161,6 +161,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
par->hook_mask = 0;
}
par->family = ctx->afi->family;
+ par->nft_compat = true;
}
static void target_compat_from_user(struct xt_target *t, void *in, void *out)
@@ -377,6 +378,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
par->hook_mask = 0;
}
par->family = ctx->afi->family;
+ par->nft_compat = true;
}
static void match_compat_from_user(struct xt_match *m, void *in, void *out)
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 51a459c3c649..83032464a4bd 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -947,11 +947,9 @@ static int xt_table_seq_show(struct seq_file *seq, void *v)
{
struct xt_table *table = list_entry(v, struct xt_table, list);
- if (strlen(table->name)) {
+ if (*table->name)
seq_printf(seq, "%s\n", table->name);
- return seq_has_overflowed(seq);
- } else
- return 0;
+ return 0;
}
static const struct seq_operations xt_table_seq_ops = {
@@ -1087,10 +1085,8 @@ static int xt_match_seq_show(struct seq_file *seq, void *v)
if (trav->curr == trav->head)
return 0;
match = list_entry(trav->curr, struct xt_match, list);
- if (*match->name == '\0')
- return 0;
- seq_printf(seq, "%s\n", match->name);
- return seq_has_overflowed(seq);
+ if (*match->name)
+ seq_printf(seq, "%s\n", match->name);
}
return 0;
}
@@ -1142,10 +1138,8 @@ static int xt_target_seq_show(struct seq_file *seq, void *v)
if (trav->curr == trav->head)
return 0;
target = list_entry(trav->curr, struct xt_target, list);
- if (*target->name == '\0')
- return 0;
- seq_printf(seq, "%s\n", target->name);
- return seq_has_overflowed(seq);
+ if (*target->name)
+ seq_printf(seq, "%s\n", target->name);
}
return 0;
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index e762de5ee89b..8c3190e2fc6a 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -277,6 +277,9 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
"FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
+ if (par->nft_compat)
+ return 0;
+
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
return 0;
@@ -299,6 +302,9 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
"FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
+ if (par->nft_compat)
+ return 0;
+
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
return 0;
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 292934d23482..a747eb475b68 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -152,6 +152,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
fl6.daddr = info->gw.in6;
fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+ fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index fab6eea1bf38..5b4743cc0436 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -73,7 +73,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
ret |= XT_ADDRTYPE_LOCAL;
- if (rt->rt6i_flags & RTF_ANYCAST)
+ if (ipv6_anycast_destination((struct dst_entry *)rt, addr))
ret |= XT_ADDRTYPE_ANYCAST;
dst_release(&rt->dst);
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 23345238711b..ebd41dc501e5 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -23,6 +23,7 @@ MODULE_ALIAS("ipt_mark");
MODULE_ALIAS("ip6t_mark");
MODULE_ALIAS("ipt_MARK");
MODULE_ALIAS("ip6t_MARK");
+MODULE_ALIAS("arpt_MARK");
static unsigned int
mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 89045982ec94..b103e9627716 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -15,8 +15,9 @@
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set.h>
#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <uapi/linux/netfilter/xt_set.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index bf6e76643f78..69d67c300b80 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -76,17 +76,18 @@ struct listeners {
};
/* state bits */
-#define NETLINK_CONGESTED 0x0
+#define NETLINK_S_CONGESTED 0x0
/* flags */
-#define NETLINK_KERNEL_SOCKET 0x1
-#define NETLINK_RECV_PKTINFO 0x2
-#define NETLINK_BROADCAST_SEND_ERROR 0x4
-#define NETLINK_RECV_NO_ENOBUFS 0x8
+#define NETLINK_F_KERNEL_SOCKET 0x1
+#define NETLINK_F_RECV_PKTINFO 0x2
+#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
+#define NETLINK_F_RECV_NO_ENOBUFS 0x8
+#define NETLINK_F_LISTEN_ALL_NSID 0x10
static inline int netlink_is_kernel(struct sock *sk)
{
- return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
+ return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
}
struct netlink_table *nl_table __read_mostly;
@@ -256,8 +257,9 @@ static void netlink_overrun(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
- if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
+ if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
+ if (!test_and_set_bit(NETLINK_S_CONGESTED,
+ &nlk_sk(sk)->state)) {
sk->sk_err = ENOBUFS;
sk->sk_error_report(sk);
}
@@ -270,8 +272,8 @@ static void netlink_rcv_wake(struct sock *sk)
struct netlink_sock *nlk = nlk_sk(sk);
if (skb_queue_empty(&sk->sk_receive_queue))
- clear_bit(NETLINK_CONGESTED, &nlk->state);
- if (!test_bit(NETLINK_CONGESTED, &nlk->state))
+ clear_bit(NETLINK_S_CONGESTED, &nlk->state);
+ if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
}
@@ -1118,14 +1120,15 @@ static struct proto netlink_proto = {
};
static int __netlink_create(struct net *net, struct socket *sock,
- struct mutex *cb_mutex, int protocol)
+ struct mutex *cb_mutex, int protocol,
+ int kern)
{
struct sock *sk;
struct netlink_sock *nlk;
sock->ops = &netlink_ops;
- sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
+ sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
if (!sk)
return -ENOMEM;
@@ -1187,7 +1190,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (err < 0)
goto out;
- err = __netlink_create(net, sock, cb_mutex, protocol);
+ err = __netlink_create(net, sock, cb_mutex, protocol, kern);
if (err < 0)
goto out_module;
@@ -1297,20 +1300,24 @@ static int netlink_autobind(struct socket *sock)
struct netlink_table *table = &nl_table[sk->sk_protocol];
s32 portid = task_tgid_vnr(current);
int err;
- static s32 rover = -4097;
+ s32 rover = -4096;
+ bool ok;
retry:
cond_resched();
rcu_read_lock();
- if (__netlink_lookup(table, portid, net)) {
+ ok = !__netlink_lookup(table, portid, net);
+ rcu_read_unlock();
+ if (!ok) {
/* Bind collision, search negative portid values. */
- portid = rover--;
- if (rover > -4097)
+ if (rover == -4096)
+ /* rover will be in range [S32_MIN, -4097] */
+ rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
+ else if (rover >= -4096)
rover = -4097;
- rcu_read_unlock();
+ portid = rover--;
goto retry;
}
- rcu_read_unlock();
err = netlink_insert(sk, portid);
if (err == -EADDRINUSE)
@@ -1657,7 +1664,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
nlk = nlk_sk(sk);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+ test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
!netlink_skb_is_mmaped(skb)) {
DECLARE_WAITQUEUE(wait, current);
if (!*timeo) {
@@ -1672,7 +1679,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
add_wait_queue(&nlk->wait, &wait);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+ test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
*timeo = schedule_timeout(*timeo);
@@ -1896,7 +1903,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
struct netlink_sock *nlk = nlk_sk(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
- !test_bit(NETLINK_CONGESTED, &nlk->state)) {
+ !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
netlink_skb_set_owner_r(skb, sk);
__netlink_sendskb(sk, skb);
return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
@@ -1932,8 +1939,17 @@ static void do_one_broadcast(struct sock *sk,
!test_bit(p->group - 1, nlk->groups))
return;
- if (!net_eq(sock_net(sk), p->net))
- return;
+ if (!net_eq(sock_net(sk), p->net)) {
+ if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
+ return;
+
+ if (!peernet_has_id(sock_net(sk), p->net))
+ return;
+
+ if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
+ CAP_NET_BROADCAST))
+ return;
+ }
if (p->failure) {
netlink_overrun(sk);
@@ -1957,23 +1973,33 @@ static void do_one_broadcast(struct sock *sk,
netlink_overrun(sk);
/* Clone failed. Notify ALL listeners. */
p->failure = 1;
- if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
+ if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
p->delivery_failure = 1;
- } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
+ goto out;
+ }
+ if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
- } else if (sk_filter(sk, p->skb2)) {
+ goto out;
+ }
+ if (sk_filter(sk, p->skb2)) {
kfree_skb(p->skb2);
p->skb2 = NULL;
- } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
+ goto out;
+ }
+ NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
+ NETLINK_CB(p->skb2).nsid_is_set = true;
+ val = netlink_broadcast_deliver(sk, p->skb2);
+ if (val < 0) {
netlink_overrun(sk);
- if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
+ if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
p->delivery_failure = 1;
} else {
p->congested |= val;
p->delivered = 1;
p->skb2 = NULL;
}
+out:
sock_put(sk);
}
@@ -2058,7 +2084,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
!test_bit(p->group - 1, nlk->groups))
goto out;
- if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
+ if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
ret = 1;
goto out;
}
@@ -2077,7 +2103,7 @@ out:
* @code: error code, must be negative (as usual in kernelspace)
*
* This function returns the number of broadcast listeners that have set the
- * NETLINK_RECV_NO_ENOBUFS socket option.
+ * NETLINK_NO_ENOBUFS socket option.
*/
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{
@@ -2137,9 +2163,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case NETLINK_PKTINFO:
if (val)
- nlk->flags |= NETLINK_RECV_PKTINFO;
+ nlk->flags |= NETLINK_F_RECV_PKTINFO;
else
- nlk->flags &= ~NETLINK_RECV_PKTINFO;
+ nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
err = 0;
break;
case NETLINK_ADD_MEMBERSHIP:
@@ -2168,18 +2194,18 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
}
case NETLINK_BROADCAST_ERROR:
if (val)
- nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
+ nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
else
- nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
+ nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
err = 0;
break;
case NETLINK_NO_ENOBUFS:
if (val) {
- nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
- clear_bit(NETLINK_CONGESTED, &nlk->state);
+ nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
+ clear_bit(NETLINK_S_CONGESTED, &nlk->state);
wake_up_interruptible(&nlk->wait);
} else {
- nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
+ nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
}
err = 0;
break;
@@ -2202,6 +2228,16 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
break;
}
#endif /* CONFIG_NETLINK_MMAP */
+ case NETLINK_LISTEN_ALL_NSID:
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
+ return -EPERM;
+
+ if (val)
+ nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
+ else
+ nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
+ err = 0;
+ break;
default:
err = -ENOPROTOOPT;
}
@@ -2228,7 +2264,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
- val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
+ val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
@@ -2238,7 +2274,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
- val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
+ val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
@@ -2248,7 +2284,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
- val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
+ val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
if (put_user(len, optlen) ||
put_user(val, optval))
return -EFAULT;
@@ -2268,6 +2304,16 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
}
+static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb)
+{
+ if (!NETLINK_CB(skb).nsid_is_set)
+ return;
+
+ put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
+ &NETLINK_CB(skb).nsid);
+}
+
static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
@@ -2419,8 +2465,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
msg->msg_namelen = sizeof(*addr);
}
- if (nlk->flags & NETLINK_RECV_PKTINFO)
+ if (nlk->flags & NETLINK_F_RECV_PKTINFO)
netlink_cmsg_recv_pktinfo(msg, skb);
+ if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
+ netlink_cmsg_listen_all_nsid(sk, msg, skb);
memset(&scm, 0, sizeof(scm));
scm.creds = *NETLINK_CREDS(skb);
@@ -2474,17 +2522,10 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
return NULL;
- /*
- * We have to just have a reference on the net from sk, but don't
- * get_net it. Besides, we cannot get and then put the net here.
- * So we create one inside init_net and the move it to net.
- */
-
- if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
+ if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
goto out_sock_release_nosk;
sk = sock->sk;
- sk_change_net(sk, net);
if (!cfg || cfg->groups < 32)
groups = 32;
@@ -2503,7 +2544,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
goto out_sock_release;
nlk = nlk_sk(sk);
- nlk->flags |= NETLINK_KERNEL_SOCKET;
+ nlk->flags |= NETLINK_F_KERNEL_SOCKET;
netlink_table_grab();
if (!nl_table[unit].registered) {
@@ -2540,7 +2581,10 @@ EXPORT_SYMBOL(__netlink_kernel_create);
void
netlink_kernel_release(struct sock *sk)
{
- sk_release_kernel(sk);
+ if (sk == NULL || sk->sk_socket == NULL)
+ return;
+
+ sock_release(sk->sk_socket);
}
EXPORT_SYMBOL(netlink_kernel_release);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index b987fd56c3c5..ed212ffc1d9d 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -433,7 +433,7 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
+ sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern);
if (sk == NULL)
return -ENOMEM;
@@ -476,7 +476,7 @@ static struct sock *nr_make_new(struct sock *osk)
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
- sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
+ sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0);
if (sk == NULL)
return NULL;
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index 2277276f52bc..54e40fa47822 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -40,7 +40,7 @@ static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
read_lock(&proto_tab_lock);
if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) {
- rc = proto_tab[proto]->create(net, sock, proto_tab[proto]);
+ rc = proto_tab[proto]->create(net, sock, proto_tab[proto], kern);
module_put(proto_tab[proto]->owner);
}
read_unlock(&proto_tab_lock);
diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
index de1789e3cc82..1f68724d44d3 100644
--- a/net/nfc/llcp.h
+++ b/net/nfc/llcp.h
@@ -225,7 +225,7 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
struct sk_buff *skb, u8 direction);
/* Sock API */
-struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern);
void nfc_llcp_sock_free(struct nfc_llcp_sock *sock);
void nfc_llcp_accept_unlink(struct sock *sk);
void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index b18f07ccb504..98876274a1ee 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -934,7 +934,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
sock->ssap = ssap;
}
- new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
+ new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC, 0);
if (new_sk == NULL) {
reason = LLCP_DM_REJ;
release_sock(&sock->sk);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index 9578bd6a4f3e..b7de0da46acd 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -942,12 +942,12 @@ static void llcp_sock_destruct(struct sock *sk)
}
}
-struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern)
{
struct sock *sk;
struct nfc_llcp_sock *llcp_sock;
- sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto);
+ sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern);
if (!sk)
return NULL;
@@ -993,7 +993,7 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
}
static int llcp_sock_create(struct net *net, struct socket *sock,
- const struct nfc_protocol *nfc_proto)
+ const struct nfc_protocol *nfc_proto, int kern)
{
struct sock *sk;
@@ -1009,7 +1009,7 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
else
sock->ops = &llcp_sock_ops;
- sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
+ sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
if (sk == NULL)
return -ENOMEM;
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index a8ce80b47720..5c93e8412a26 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -30,7 +30,7 @@ struct nfc_protocol {
struct proto *proto;
struct module *owner;
int (*create)(struct net *net, struct socket *sock,
- const struct nfc_protocol *nfc_proto);
+ const struct nfc_protocol *nfc_proto, int kern);
};
struct nfc_rawsock {
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 82b4e8024778..e9a91488fe3d 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -334,7 +334,7 @@ static void rawsock_destruct(struct sock *sk)
}
static int rawsock_create(struct net *net, struct socket *sock,
- const struct nfc_protocol *nfc_proto)
+ const struct nfc_protocol *nfc_proto, int kern)
{
struct sock *sk;
@@ -348,7 +348,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
else
sock->ops = &rawsock_ops;
- sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
+ sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index ed6b0f8dd1bb..15840401a2ce 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -59,7 +59,7 @@ config OPENVSWITCH_VXLAN
config OPENVSWITCH_GENEVE
tristate "Open vSwitch Geneve tunneling support"
depends on OPENVSWITCH
- depends on GENEVE
+ depends on GENEVE_CORE
default OPENVSWITCH
---help---
If you say Y here, then the Open vSwitch will be able create geneve vport.
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index b491c1c296fe..8a8c0b8b4f63 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -608,17 +608,16 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_key *key, const struct nlattr *attr)
+ struct sw_flow_key *key, const struct nlattr *attr,
+ const struct nlattr *actions, int actions_len)
{
struct ovs_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
+ memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_ACTION;
- upcall.userdata = NULL;
- upcall.portid = 0;
- upcall.egress_tun_info = NULL;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
a = nla_next(a, &rem)) {
@@ -647,6 +646,13 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
break;
}
+ case OVS_USERSPACE_ATTR_ACTIONS: {
+ /* Include actions. */
+ upcall.actions = actions;
+ upcall.actions_len = actions_len;
+ break;
+ }
+
} /* End of switch. */
}
@@ -654,7 +660,8 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
}
static int sample(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_key *key, const struct nlattr *attr)
+ struct sw_flow_key *key, const struct nlattr *attr,
+ const struct nlattr *actions, int actions_len)
{
const struct nlattr *acts_list = NULL;
const struct nlattr *a;
@@ -688,7 +695,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
*/
if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
nla_is_last(a, rem)))
- return output_userspace(dp, skb, key, a);
+ return output_userspace(dp, skb, key, a, actions, actions_len);
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
@@ -872,7 +879,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
break;
case OVS_ACTION_ATTR_USERSPACE:
- output_userspace(dp, skb, key, a);
+ output_userspace(dp, skb, key, a, attr, len);
break;
case OVS_ACTION_ATTR_HASH:
@@ -916,7 +923,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
break;
case OVS_ACTION_ATTR_SAMPLE:
- err = sample(dp, skb, key, a);
+ err = sample(dp, skb, key, a, attr, len);
break;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 096c6276e6b9..ff8c4a4c1609 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -272,10 +272,9 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
struct dp_upcall_info upcall;
int error;
+ memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_MISS;
- upcall.userdata = NULL;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
- upcall.egress_tun_info = NULL;
error = ovs_dp_upcall(dp, skb, key, &upcall);
if (unlikely(error))
kfree_skb(skb);
@@ -397,6 +396,10 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
if (upcall_info->egress_tun_info)
size += nla_total_size(ovs_tun_key_attr_size());
+ /* OVS_PACKET_ATTR_ACTIONS */
+ if (upcall_info->actions_len)
+ size += nla_total_size(upcall_info->actions_len);
+
return size;
}
@@ -478,6 +481,17 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
nla_nest_end(user_skb, nla);
}
+ if (upcall_info->actions_len) {
+ nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+ err = ovs_nla_put_actions(upcall_info->actions,
+ upcall_info->actions_len,
+ user_skb);
+ if (!err)
+ nla_nest_end(user_skb, nla);
+ else
+ nla_nest_cancel(user_skb, nla);
+ }
+
/* Only reserve room for attribute header, packet data is added
* in skb_zerocopy() */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -545,7 +559,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
/* Normally, setting the skb 'protocol' field would be handled by a
* call to eth_type_trans(), but it assumes there's a sending
* device, which we may not have. */
- if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+ if (eth_proto_is_802_3(eth->h_proto))
packet->protocol = eth->h_proto;
else
packet->protocol = htons(ETH_P_802_2);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 4ec4a480b147..cd691e935e08 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -116,6 +116,8 @@ struct ovs_skb_cb {
struct dp_upcall_info {
const struct ovs_tunnel_info *egress_tun_info;
const struct nlattr *userdata;
+ const struct nlattr *actions;
+ int actions_len;
u32 portid;
u8 cmd;
};
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 2dacc7b5af23..bc7b0aba994a 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -332,7 +332,7 @@ static __be16 parse_ethertype(struct sk_buff *skb)
proto = *(__be16 *) skb->data;
__skb_pull(skb, sizeof(__be16));
- if (ntohs(proto) >= ETH_P_802_3_MIN)
+ if (eth_proto_is_802_3(proto))
return proto;
if (skb->len < sizeof(struct llc_snap_hdr))
@@ -349,7 +349,7 @@ static __be16 parse_ethertype(struct sk_buff *skb)
__skb_pull(skb, sizeof(struct llc_snap_hdr));
- if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
+ if (eth_proto_is_802_3(llc->ethertype))
return llc->ethertype;
return htons(ETH_P_802_2);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c691b1a1eee0..624e41c4267f 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -816,7 +816,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
if (is_mask) {
/* Always exact match EtherType. */
eth_type = htons(0xffff);
- } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+ } else if (!eth_proto_is_802_3(eth_type)) {
OVS_NLERR(log, "EtherType %x is less than min %x",
ntohs(eth_type), ETH_P_802_3_MIN);
return -EINVAL;
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index bf02fd5808c9..208c576bd1b6 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -46,11 +46,6 @@ static inline struct geneve_port *geneve_vport(const struct vport *vport)
return vport_priv(vport);
}
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
- return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
/* Convert 64 bit tunnel ID to 24 bit VNI. */
static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
{
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b5989c6ee551..fd5164139bf0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1234,27 +1234,81 @@ static void packet_free_pending(struct packet_sock *po)
free_percpu(po->tx_ring.pending_refcnt);
}
-static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
+#define ROOM_POW_OFF 2
+#define ROOM_NONE 0x0
+#define ROOM_LOW 0x1
+#define ROOM_NORMAL 0x2
+
+static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
+{
+ int idx, len;
+
+ len = po->rx_ring.frame_max + 1;
+ idx = po->rx_ring.head;
+ if (pow_off)
+ idx += len >> pow_off;
+ if (idx >= len)
+ idx -= len;
+ return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
+}
+
+static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
+{
+ int idx, len;
+
+ len = po->rx_ring.prb_bdqc.knum_blocks;
+ idx = po->rx_ring.prb_bdqc.kactive_blk_num;
+ if (pow_off)
+ idx += len >> pow_off;
+ if (idx >= len)
+ idx -= len;
+ return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
+}
+
+static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
{
struct sock *sk = &po->sk;
- bool has_room;
+ int ret = ROOM_NONE;
+
+ if (po->prot_hook.func != tpacket_rcv) {
+ int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
+ - (skb ? skb->truesize : 0);
+ if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
+ return ROOM_NORMAL;
+ else if (avail > 0)
+ return ROOM_LOW;
+ else
+ return ROOM_NONE;
+ }
+
+ if (po->tp_version == TPACKET_V3) {
+ if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
+ ret = ROOM_NORMAL;
+ else if (__tpacket_v3_has_room(po, 0))
+ ret = ROOM_LOW;
+ } else {
+ if (__tpacket_has_room(po, ROOM_POW_OFF))
+ ret = ROOM_NORMAL;
+ else if (__tpacket_has_room(po, 0))
+ ret = ROOM_LOW;
+ }
- if (po->prot_hook.func != tpacket_rcv)
- return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
- <= sk->sk_rcvbuf;
+ return ret;
+}
- spin_lock(&sk->sk_receive_queue.lock);
- if (po->tp_version == TPACKET_V3)
- has_room = prb_lookup_block(po, &po->rx_ring,
- po->rx_ring.prb_bdqc.kactive_blk_num,
- TP_STATUS_KERNEL);
- else
- has_room = packet_lookup_frame(po, &po->rx_ring,
- po->rx_ring.head,
- TP_STATUS_KERNEL);
- spin_unlock(&sk->sk_receive_queue.lock);
+static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
+{
+ int ret;
+ bool has_room;
+
+ spin_lock_bh(&po->sk.sk_receive_queue.lock);
+ ret = __packet_rcv_has_room(po, skb);
+ has_room = ret == ROOM_NORMAL;
+ if (po->pressure == has_room)
+ po->pressure = !has_room;
+ spin_unlock_bh(&po->sk.sk_receive_queue.lock);
- return has_room;
+ return ret;
}
static void packet_sock_destruct(struct sock *sk)
@@ -1282,6 +1336,20 @@ static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
return x;
}
+static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
+{
+ u32 rxhash;
+ int i, count = 0;
+
+ rxhash = skb_get_hash(skb);
+ for (i = 0; i < ROLLOVER_HLEN; i++)
+ if (po->rollover->history[i] == rxhash)
+ count++;
+
+ po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
+ return count > (ROLLOVER_HLEN >> 1);
+}
+
static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
@@ -1318,22 +1386,40 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
struct sk_buff *skb,
- unsigned int idx, unsigned int skip,
+ unsigned int idx, bool try_self,
unsigned int num)
{
- unsigned int i, j;
+ struct packet_sock *po, *po_next, *po_skip = NULL;
+ unsigned int i, j, room = ROOM_NONE;
- i = j = min_t(int, f->next[idx], num - 1);
+ po = pkt_sk(f->arr[idx]);
+
+ if (try_self) {
+ room = packet_rcv_has_room(po, skb);
+ if (room == ROOM_NORMAL ||
+ (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
+ return idx;
+ po_skip = po;
+ }
+
+ i = j = min_t(int, po->rollover->sock, num - 1);
do {
- if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
+ po_next = pkt_sk(f->arr[i]);
+ if (po_next != po_skip && !po_next->pressure &&
+ packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
if (i != j)
- f->next[idx] = i;
+ po->rollover->sock = i;
+ atomic_long_inc(&po->rollover->num);
+ if (room == ROOM_LOW)
+ atomic_long_inc(&po->rollover->num_huge);
return i;
}
+
if (++i == num)
i = 0;
} while (i != j);
+ atomic_long_inc(&po->rollover->num_failed);
return idx;
}
@@ -1386,17 +1472,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
idx = fanout_demux_qm(f, skb, num);
break;
case PACKET_FANOUT_ROLLOVER:
- idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
+ idx = fanout_demux_rollover(f, skb, 0, false, num);
break;
}
- po = pkt_sk(f->arr[idx]);
- if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
- unlikely(!packet_rcv_has_room(po, skb))) {
- idx = fanout_demux_rollover(f, skb, idx, idx, num);
- po = pkt_sk(f->arr[idx]);
- }
+ if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
+ idx = fanout_demux_rollover(f, skb, idx, true, num);
+ po = pkt_sk(f->arr[idx]);
return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
}
@@ -1467,6 +1550,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
if (po->fanout)
return -EALREADY;
+ if (type == PACKET_FANOUT_ROLLOVER ||
+ (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
+ po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
+ if (!po->rollover)
+ return -ENOMEM;
+ atomic_long_set(&po->rollover->num, 0);
+ atomic_long_set(&po->rollover->num_huge, 0);
+ atomic_long_set(&po->rollover->num_failed, 0);
+ }
+
mutex_lock(&fanout_mutex);
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
@@ -1515,6 +1608,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
}
out:
mutex_unlock(&fanout_mutex);
+ if (err) {
+ kfree(po->rollover);
+ po->rollover = NULL;
+ }
return err;
}
@@ -1536,6 +1633,8 @@ static void fanout_release(struct sock *sk)
kfree(f);
}
mutex_unlock(&fanout_mutex);
+
+ kfree(po->rollover);
}
static const struct proto_ops packet_ops;
@@ -2835,7 +2934,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
- sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
+ sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
if (sk == NULL)
goto out;
@@ -2865,6 +2964,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
+ po->rollover = NULL;
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
@@ -2942,6 +3042,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (skb == NULL)
goto out;
+ if (pkt_sk(sk)->pressure)
+ packet_rcv_has_room(pkt_sk(sk), NULL);
+
if (pkt_sk(sk)->has_vnet_hdr) {
struct virtio_net_hdr vnet_hdr = { 0 };
@@ -3485,6 +3588,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
struct packet_sock *po = pkt_sk(sk);
void *data = &val;
union tpacket_stats_u st;
+ struct tpacket_rollover_stats rstats;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -3560,6 +3664,15 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
((u32)po->fanout->flags << 24)) :
0);
break;
+ case PACKET_ROLLOVER_STATS:
+ if (!po->rollover)
+ return -EINVAL;
+ rstats.tp_all = atomic_long_read(&po->rollover->num);
+ rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+ rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+ data = &rstats;
+ lv = sizeof(rstats);
+ break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
break;
@@ -3697,6 +3810,8 @@ static unsigned int packet_poll(struct file *file, struct socket *sock,
TP_STATUS_KERNEL))
mask |= POLLIN | POLLRDNORM;
}
+ if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
+ po->pressure = 0;
spin_unlock_bh(&sk->sk_receive_queue.lock);
spin_lock_bh(&sk->sk_write_queue.lock);
if (po->tx_ring.pg_vec) {
diff --git a/net/packet/internal.h b/net/packet/internal.h
index fe6e20caea1d..c035d263c1e8 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -82,12 +82,20 @@ struct packet_fanout {
atomic_t rr_cur;
struct list_head list;
struct sock *arr[PACKET_FANOUT_MAX];
- int next[PACKET_FANOUT_MAX];
spinlock_t lock;
atomic_t sk_ref;
struct packet_type prot_hook ____cacheline_aligned_in_smp;
};
+struct packet_rollover {
+ int sock;
+ atomic_long_t num;
+ atomic_long_t num_huge;
+ atomic_long_t num_failed;
+#define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32))
+ u32 history[ROLLOVER_HLEN] ____cacheline_aligned;
+} ____cacheline_aligned_in_smp;
+
struct packet_sock {
/* struct sock has to be the first member of packet_sock */
struct sock sk;
@@ -102,8 +110,10 @@ struct packet_sock {
auxdata:1,
origdev:1,
has_vnet_hdr:1;
+ int pressure;
int ifindex; /* bound device */
__be16 num;
+ struct packet_rollover *rollover;
struct packet_mclist *mclist;
atomic_t mapped;
enum tpacket_versions tp_version;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 32ab87d34828..10d42f3220ab 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -97,7 +97,7 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
goto out;
}
- sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot);
+ sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot, kern);
if (sk == NULL) {
err = -ENOMEM;
goto out;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 6de2aeb98a1f..850a86cde0b3 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -845,7 +845,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
}
/* Create a new to-be-accepted sock */
- newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
+ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
if (!newsk) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
err = -ENOBUFS;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 10443377fb9d..2ad9032372b2 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -270,6 +270,28 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
return ret;
}
+static int rds_set_transport(struct rds_sock *rs, char __user *optval,
+ int optlen)
+{
+ int t_type;
+
+ if (rs->rs_transport)
+ return -EOPNOTSUPP; /* previously attached to transport */
+
+ if (optlen != sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type)))
+ return -EFAULT;
+
+ if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
+ return -EINVAL;
+
+ rs->rs_transport = rds_trans_get(t_type);
+
+ return rs->rs_transport ? 0 : -ENOPROTOOPT;
+}
+
static int rds_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
@@ -300,6 +322,11 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
case RDS_CONG_MONITOR:
ret = rds_cong_monitor(rs, optval, optlen);
break;
+ case SO_RDS_TRANSPORT:
+ lock_sock(sock->sk);
+ ret = rds_set_transport(rs, optval, optlen);
+ release_sock(sock->sk);
+ break;
default:
ret = -ENOPROTOOPT;
}
@@ -312,6 +339,7 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
int ret = -ENOPROTOOPT, len;
+ int trans;
if (level != SOL_RDS)
goto out;
@@ -337,6 +365,19 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
else
ret = 0;
break;
+ case SO_RDS_TRANSPORT:
+ if (len < sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ trans = (rs->rs_transport ? rs->rs_transport->t_type :
+ RDS_TRANS_NONE); /* unbound */
+ if (put_user(trans, (int __user *)optval) ||
+ put_user(sizeof(int), optlen))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
default:
break;
}
@@ -440,7 +481,7 @@ static int rds_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_SEQPACKET || protocol)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto);
+ sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index a2e6562da751..4ebd29c128b6 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -181,6 +181,10 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (ret)
goto out;
+ if (rs->rs_transport) { /* previously bound */
+ ret = 0;
+ goto out;
+ }
trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
if (!trans) {
ret = -EADDRNOTAVAIL;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 0d41155a2258..a33fb4ad3535 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -408,11 +408,6 @@ struct rds_notifier {
* should try hard not to block.
*/
-#define RDS_TRANS_IB 0
-#define RDS_TRANS_IWARP 1
-#define RDS_TRANS_TCP 2
-#define RDS_TRANS_COUNT 3
-
struct rds_transport {
char t_name[TRANSNAMSIZ];
struct list_head t_item;
@@ -803,6 +798,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr);
void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
+struct rds_transport *rds_trans_get(int t_type);
int rds_trans_init(void);
void rds_trans_exit(void);
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 7f2ac4fec367..8b4a6cd2c3a7 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -101,6 +101,27 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr)
return ret;
}
+struct rds_transport *rds_trans_get(int t_type)
+{
+ struct rds_transport *ret = NULL;
+ struct rds_transport *trans;
+ unsigned int i;
+
+ down_read(&rds_trans_sem);
+ for (i = 0; i < RDS_TRANS_COUNT; i++) {
+ trans = transports[i];
+
+ if (trans && trans->t_type == t_type &&
+ (!trans->t_owner || try_module_get(trans->t_owner))) {
+ ret = trans;
+ break;
+ }
+ }
+ up_read(&rds_trans_sem);
+
+ return ret;
+}
+
/*
* This returns the number of stats entries in the snapshot and only
* copies them using the iter if there is enough space for them. The
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index fa7cd792791c..f12149a29cb1 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -794,7 +794,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_resume_polling);
-static int rfkill_suspend(struct device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int rfkill_suspend(struct device *dev)
{
struct rfkill *rfkill = to_rfkill(dev);
@@ -818,13 +819,18 @@ static int rfkill_resume(struct device *dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
+#define RFKILL_PM_OPS (&rfkill_pm_ops)
+#else
+#define RFKILL_PM_OPS NULL
+#endif
+
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
.dev_groups = rfkill_dev_groups,
.dev_uevent = rfkill_dev_uevent,
- .suspend = rfkill_suspend,
- .resume = rfkill_resume,
+ .pm = RFKILL_PM_OPS,
};
bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index d978f2f46ff3..d5d58d919552 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -112,21 +112,17 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
- gpio = devm_gpiod_get(&pdev->dev, "reset");
- if (!IS_ERR(gpio)) {
- ret = gpiod_direction_output(gpio, 0);
- if (ret)
- return ret;
- rfkill->reset_gpio = gpio;
- }
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
- gpio = devm_gpiod_get(&pdev->dev, "shutdown");
- if (!IS_ERR(gpio)) {
- ret = gpiod_direction_output(gpio, 0);
- if (ret)
- return ret;
- rfkill->shutdown_gpio = gpio;
- }
+ rfkill->reset_gpio = gpio;
+
+ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ rfkill->shutdown_gpio = gpio;
/* Make sure at-least one of the GPIO is defined and that
* a name is specified for this instance
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8ae603069a1a..36dbc2da3661 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -520,7 +520,7 @@ static int rose_create(struct net *net, struct socket *sock, int protocol,
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
- sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
+ sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
if (sk == NULL)
return -ENOMEM;
@@ -559,7 +559,7 @@ static struct sock *rose_make_new(struct sock *osk)
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
- sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
+ sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
if (sk == NULL)
return NULL;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 0095b9a0b779..25d60ed15284 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -632,7 +632,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &rxrpc_rpc_ops;
sock->state = SS_UNCONNECTED;
- sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
+ sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
if (!sk)
return -ENOMEM;
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index ca904ed5400a..78483b4602bf 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -73,8 +73,8 @@ static int rxrpc_create_local(struct rxrpc_local *local)
_enter("%p{%d}", local, local->srx.transport_type);
/* create a socket to represent the local endpoint */
- ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
- &local->socket);
+ ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
+ IPPROTO_UDP, &local->socket);
if (ret < 0) {
_leave(" = %d [socket]", ret);
return ret;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2274e723a3df..daa33432b716 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -312,6 +312,7 @@ config NET_SCH_PIE
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
+ select NET_INGRESS
---help---
Say Y here if you want to use classifiers for incoming packets.
If unsure, say Y.
@@ -477,6 +478,16 @@ config NET_CLS_BPF
To compile this code as a module, choose M here: the module will
be called cls_bpf.
+config NET_CLS_FLOWER
+ tristate "Flower classifier"
+ select NET_CLS
+ ---help---
+ If you say Y here, you will be able to classify packets based on
+ a configurable combination of packet keys and masks.
+
+ To compile this code as a module, choose M here: the module will
+ be called cls_flower.
+
config NET_EMATCH
bool "Extended Matches"
select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 7ca7f4c1b8c2..690c1689e090 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o
obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o
obj-$(CONFIG_NET_CLS_CGROUP) += cls_cgroup.o
obj-$(CONFIG_NET_CLS_BPF) += cls_bpf.o
+obj-$(CONFIG_NET_CLS_FLOWER) += cls_flower.o
obj-$(CONFIG_NET_EMATCH) += ematch.o
obj-$(CONFIG_NET_EMATCH_CMP) += em_cmp.o
obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3d43e4979f27..af427a3dbcba 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -392,11 +392,6 @@ int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
list_for_each_entry(a, actions, list) {
repeat:
ret = a->ops->act(skb, a, res);
- if (TC_MUNGED & skb->tc_verd) {
- /* copied already, allow trampling */
- skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
- skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
- }
if (ret == TC_ACT_REPEAT)
goto repeat; /* we need a ttl - JHS */
if (ret != TC_ACT_PIPE)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index dc6a2d324bd8..1d56903fd4c7 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -37,6 +37,7 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
{
struct tcf_bpf *prog = act->priv;
int action, filter_res;
+ bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
if (unlikely(!skb_mac_header_was_set(skb)))
return TC_ACT_UNSPEC;
@@ -48,7 +49,13 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
/* Needed here for accessing maps. */
rcu_read_lock();
- filter_res = BPF_PROG_RUN(prog->filter, skb);
+ if (at_ingress) {
+ __skb_push(skb, skb->mac_len);
+ filter_res = BPF_PROG_RUN(prog->filter, skb);
+ __skb_pull(skb, skb->mac_len);
+ } else {
+ filter_res = BPF_PROG_RUN(prog->filter, skb);
+ }
rcu_read_unlock();
/* A BPF program may overwrite the default action opcode.
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 3f63ceac8e01..a42a3b257226 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -151,7 +151,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
}
at = G_TC_AT(skb->tc_verd);
- skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action);
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
goto out;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 59649d588d79..17e6d6669c7f 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -108,7 +108,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_pedit *p = a->priv;
- int i, munged = 0;
+ int i;
unsigned int off;
if (skb_unclone(skb, GFP_ATOMIC))
@@ -156,11 +156,8 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
*ptr = ((*ptr & tkey->mask) ^ tkey->val);
if (ptr == &_data)
skb_store_bits(skb, off + offset, ptr, 4);
- munged++;
}
- if (munged)
- skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
goto done;
} else
WARN(1, "pedit BUG: index %d\n", p->tcf_index);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 91bd9c19471d..c79ecfd36e0f 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -64,6 +64,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
{
struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
struct cls_bpf_prog *prog;
+#ifdef CONFIG_NET_CLS_ACT
+ bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
+#else
+ bool at_ingress = false;
+#endif
int ret = -1;
if (unlikely(!skb_mac_header_was_set(skb)))
@@ -72,7 +77,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
/* Needed here for accessing maps. */
rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {
- int filter_res = BPF_PROG_RUN(prog->filter, skb);
+ int filter_res;
+
+ if (at_ingress) {
+ /* It is safe to push/pull even if skb_shared() */
+ __skb_push(skb, skb->mac_len);
+ filter_res = BPF_PROG_RUN(prog->filter, skb);
+ __skb_pull(skb, skb->mac_len);
+ } else {
+ filter_res = BPF_PROG_RUN(prog->filter, skb);
+ }
if (filter_res == 0)
continue;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index a620c4e288a5..76bc3a20ffdb 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -26,7 +26,7 @@
#include <net/pkt_cls.h>
#include <net/ip.h>
#include <net/route.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
@@ -68,35 +68,41 @@ static inline u32 addr_fold(void *addr)
static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
- if (flow->src)
- return ntohl(flow->src);
+ __be32 src = flow_get_u32_src(flow);
+
+ if (src)
+ return ntohl(src);
+
return addr_fold(skb->sk);
}
static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
- if (flow->dst)
- return ntohl(flow->dst);
+ __be32 dst = flow_get_u32_dst(flow);
+
+ if (dst)
+ return ntohl(dst);
+
return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
}
static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
{
- return flow->ip_proto;
+ return flow->basic.ip_proto;
}
static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
{
- if (flow->ports)
- return ntohs(flow->port16[0]);
+ if (flow->ports.ports)
+ return ntohs(flow->ports.src);
return addr_fold(skb->sk);
}
static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
{
- if (flow->ports)
- return ntohs(flow->port16[1]);
+ if (flow->ports.ports)
+ return ntohs(flow->ports.dst);
return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
}
@@ -295,7 +301,7 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
keymask = f->keymask;
if (keymask & FLOW_KEYS_NEEDED)
- skb_flow_dissect(skb, &flow_keys);
+ skb_flow_dissect_flow_keys(skb, &flow_keys);
for (n = 0; n < f->nkeys; n++) {
key = ffs(keymask) - 1;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
new file mode 100644
index 000000000000..b92d3f49c23e
--- /dev/null
+++ b/net/sched/cls_flower.c
@@ -0,0 +1,691 @@
+/*
+ * net/sched/cls_flower.c Flower classifier
+ *
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rhashtable.h>
+
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+#include <net/ip.h>
+#include <net/flow_dissector.h>
+
+struct fl_flow_key {
+ int indev_ifindex;
+ struct flow_dissector_key_control control;
+ struct flow_dissector_key_basic basic;
+ struct flow_dissector_key_eth_addrs eth;
+ struct flow_dissector_key_addrs ipaddrs;
+ union {
+ struct flow_dissector_key_ipv4_addrs ipv4;
+ struct flow_dissector_key_ipv6_addrs ipv6;
+ };
+ struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct fl_flow_mask_range {
+ unsigned short int start;
+ unsigned short int end;
+};
+
+struct fl_flow_mask {
+ struct fl_flow_key key;
+ struct fl_flow_mask_range range;
+ struct rcu_head rcu;
+};
+
+struct cls_fl_head {
+ struct rhashtable ht;
+ struct fl_flow_mask mask;
+ struct flow_dissector dissector;
+ u32 hgen;
+ bool mask_assigned;
+ struct list_head filters;
+ struct rhashtable_params ht_params;
+ struct rcu_head rcu;
+};
+
+struct cls_fl_filter {
+ struct rhash_head ht_node;
+ struct fl_flow_key mkey;
+ struct tcf_exts exts;
+ struct tcf_result res;
+ struct fl_flow_key key;
+ struct list_head list;
+ u32 handle;
+ struct rcu_head rcu;
+};
+
+static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
+{
+ return mask->range.end - mask->range.start;
+}
+
+static void fl_mask_update_range(struct fl_flow_mask *mask)
+{
+ const u8 *bytes = (const u8 *) &mask->key;
+ size_t size = sizeof(mask->key);
+ size_t i, first = 0, last = size - 1;
+
+ for (i = 0; i < sizeof(mask->key); i++) {
+ if (bytes[i]) {
+ if (!first && i)
+ first = i;
+ last = i;
+ }
+ }
+ mask->range.start = rounddown(first, sizeof(long));
+ mask->range.end = roundup(last + 1, sizeof(long));
+}
+
+static void *fl_key_get_start(struct fl_flow_key *key,
+ const struct fl_flow_mask *mask)
+{
+ return (u8 *) key + mask->range.start;
+}
+
+static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
+ struct fl_flow_mask *mask)
+{
+ const long *lkey = fl_key_get_start(key, mask);
+ const long *lmask = fl_key_get_start(&mask->key, mask);
+ long *lmkey = fl_key_get_start(mkey, mask);
+ int i;
+
+ for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
+ *lmkey++ = *lkey++ & *lmask++;
+}
+
+static void fl_clear_masked_range(struct fl_flow_key *key,
+ struct fl_flow_mask *mask)
+{
+ memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
+}
+
+static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ struct cls_fl_head *head = rcu_dereference_bh(tp->root);
+ struct cls_fl_filter *f;
+ struct fl_flow_key skb_key;
+ struct fl_flow_key skb_mkey;
+
+ fl_clear_masked_range(&skb_key, &head->mask);
+ skb_key.indev_ifindex = skb->skb_iif;
+ /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
+ * so do it rather here.
+ */
+ skb_key.basic.n_proto = skb->protocol;
+ skb_flow_dissect(skb, &head->dissector, &skb_key);
+
+ fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
+
+ f = rhashtable_lookup_fast(&head->ht,
+ fl_key_get_start(&skb_mkey, &head->mask),
+ head->ht_params);
+ if (f) {
+ *res = f->res;
+ return tcf_exts_exec(skb, &f->exts, res);
+ }
+ return -1;
+}
+
+static int fl_init(struct tcf_proto *tp)
+{
+ struct cls_fl_head *head;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOBUFS;
+
+ INIT_LIST_HEAD_RCU(&head->filters);
+ rcu_assign_pointer(tp->root, head);
+
+ return 0;
+}
+
+static void fl_destroy_filter(struct rcu_head *head)
+{
+ struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+
+ tcf_exts_destroy(&f->exts);
+ kfree(f);
+}
+
+static bool fl_destroy(struct tcf_proto *tp, bool force)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct cls_fl_filter *f, *next;
+
+ if (!force && !list_empty(&head->filters))
+ return false;
+
+ list_for_each_entry_safe(f, next, &head->filters, list) {
+ list_del_rcu(&f->list);
+ call_rcu(&f->rcu, fl_destroy_filter);
+ }
+ RCU_INIT_POINTER(tp->root, NULL);
+ if (head->mask_assigned)
+ rhashtable_destroy(&head->ht);
+ kfree_rcu(head, rcu);
+ return true;
+}
+
+static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct cls_fl_filter *f;
+
+ list_for_each_entry(f, &head->filters, list)
+ if (f->handle == handle)
+ return (unsigned long) f;
+ return 0;
+}
+
+static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
+ [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC },
+ [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
+ [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
+ .len = IFNAMSIZ },
+ [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
+};
+
+static void fl_set_key_val(struct nlattr **tb,
+ void *val, int val_type,
+ void *mask, int mask_type, int len)
+{
+ if (!tb[val_type])
+ return;
+ memcpy(val, nla_data(tb[val_type]), len);
+ if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
+ memset(mask, 0xff, len);
+ else
+ memcpy(mask, nla_data(tb[mask_type]), len);
+}
+
+static int fl_set_key(struct net *net, struct nlattr **tb,
+ struct fl_flow_key *key, struct fl_flow_key *mask)
+{
+#ifdef CONFIG_NET_CLS_IND
+ if (tb[TCA_FLOWER_INDEV]) {
+ int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
+ if (err < 0)
+ return err;
+ key->indev_ifindex = err;
+ mask->indev_ifindex = 0xffffffff;
+ }
+#endif
+
+ fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
+ mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
+ sizeof(key->eth.dst));
+ fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
+ mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
+ sizeof(key->eth.src));
+ fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
+ &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.n_proto));
+ if (key->basic.n_proto == htons(ETH_P_IP) ||
+ key->basic.n_proto == htons(ETH_P_IPV6)) {
+ fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+ &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.ip_proto));
+ }
+ if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
+ &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ sizeof(key->ipv4.src));
+ fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
+ &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
+ sizeof(key->ipv4.dst));
+ } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
+ &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(key->ipv6.src));
+ fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
+ &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(key->ipv6.dst));
+ }
+ if (key->basic.ip_proto == IPPROTO_TCP) {
+ fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
+ &mask->tp.src, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.src));
+ fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
+ &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.dst));
+ } else if (key->basic.ip_proto == IPPROTO_UDP) {
+ fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
+ &mask->tp.src, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.src));
+ fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
+ &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.dst));
+ }
+
+ return 0;
+}
+
+static bool fl_mask_eq(struct fl_flow_mask *mask1,
+ struct fl_flow_mask *mask2)
+{
+ const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
+ const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
+
+ return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
+ !memcmp(lmask1, lmask2, fl_mask_range(mask1));
+}
+
+static const struct rhashtable_params fl_ht_params = {
+ .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
+ .head_offset = offsetof(struct cls_fl_filter, ht_node),
+ .automatic_shrinking = true,
+};
+
+static int fl_init_hashtable(struct cls_fl_head *head,
+ struct fl_flow_mask *mask)
+{
+ head->ht_params = fl_ht_params;
+ head->ht_params.key_len = fl_mask_range(mask);
+ head->ht_params.key_offset += mask->range.start;
+
+ return rhashtable_init(&head->ht, &head->ht_params);
+}
+
+#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
+#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
+#define FL_KEY_MEMBER_END_OFFSET(member) \
+ (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
+
+#define FL_KEY_IN_RANGE(mask, member) \
+ (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \
+ FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
+
+#define FL_KEY_SET(keys, cnt, id, member) \
+ do { \
+ keys[cnt].key_id = id; \
+ keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
+ cnt++; \
+ } while(0);
+
+#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \
+ do { \
+ if (FL_KEY_IN_RANGE(mask, member)) \
+ FL_KEY_SET(keys, cnt, id, member); \
+ } while(0);
+
+static void fl_init_dissector(struct cls_fl_head *head,
+ struct fl_flow_mask *mask)
+{
+ struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
+ size_t cnt = 0;
+
+ FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
+ FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
+ FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
+ FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+ FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
+ FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_PORTS, tp);
+
+ skb_flow_dissector_init(&head->dissector, keys, cnt);
+}
+
+static int fl_check_assign_mask(struct cls_fl_head *head,
+ struct fl_flow_mask *mask)
+{
+ int err;
+
+ if (head->mask_assigned) {
+ if (!fl_mask_eq(&head->mask, mask))
+ return -EINVAL;
+ else
+ return 0;
+ }
+
+ /* Mask is not assigned yet. So assign it and init hashtable
+ * according to that.
+ */
+ err = fl_init_hashtable(head, mask);
+ if (err)
+ return err;
+ memcpy(&head->mask, mask, sizeof(head->mask));
+ head->mask_assigned = true;
+
+ fl_init_dissector(head, mask);
+
+ return 0;
+}
+
+static int fl_set_parms(struct net *net, struct tcf_proto *tp,
+ struct cls_fl_filter *f, struct fl_flow_mask *mask,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est, bool ovr)
+{
+ struct tcf_exts e;
+ int err;
+
+ tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
+ err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_FLOWER_CLASSID]) {
+ f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
+ tcf_bind_filter(tp, &f->res, base);
+ }
+
+ err = fl_set_key(net, tb, &f->key, &mask->key);
+ if (err)
+ goto errout;
+
+ fl_mask_update_range(mask);
+ fl_set_masked_key(&f->mkey, &f->key, mask);
+
+ tcf_exts_change(tp, &f->exts, &e);
+
+ return 0;
+errout:
+ tcf_exts_destroy(&e);
+ return err;
+}
+
+static u32 fl_grab_new_handle(struct tcf_proto *tp,
+ struct cls_fl_head *head)
+{
+ unsigned int i = 0x80000000;
+ u32 handle;
+
+ do {
+ if (++head->hgen == 0x7FFFFFFF)
+ head->hgen = 1;
+ } while (--i > 0 && fl_get(tp, head->hgen));
+
+ if (unlikely(i == 0)) {
+ pr_err("Insufficient number of handles\n");
+ handle = 0;
+ } else {
+ handle = head->hgen;
+ }
+
+ return handle;
+}
+
+static int fl_change(struct net *net, struct sk_buff *in_skb,
+ struct tcf_proto *tp, unsigned long base,
+ u32 handle, struct nlattr **tca,
+ unsigned long *arg, bool ovr)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
+ struct cls_fl_filter *fnew;
+ struct nlattr *tb[TCA_FLOWER_MAX + 1];
+ struct fl_flow_mask mask = {};
+ int err;
+
+ if (!tca[TCA_OPTIONS])
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
+ if (err < 0)
+ return err;
+
+ if (fold && handle && fold->handle != handle)
+ return -EINVAL;
+
+ fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
+ if (!fnew)
+ return -ENOBUFS;
+
+ tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
+
+ if (!handle) {
+ handle = fl_grab_new_handle(tp, head);
+ if (!handle) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
+ fnew->handle = handle;
+
+ err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
+ if (err)
+ goto errout;
+
+ err = fl_check_assign_mask(head, &mask);
+ if (err)
+ goto errout;
+
+ err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
+ head->ht_params);
+ if (err)
+ goto errout;
+ if (fold)
+ rhashtable_remove_fast(&head->ht, &fold->ht_node,
+ head->ht_params);
+
+ *arg = (unsigned long) fnew;
+
+ if (fold) {
+ list_replace_rcu(&fnew->list, &fold->list);
+ tcf_unbind_filter(tp, &fold->res);
+ call_rcu(&fold->rcu, fl_destroy_filter);
+ } else {
+ list_add_tail_rcu(&fnew->list, &head->filters);
+ }
+
+ return 0;
+
+errout:
+ kfree(fnew);
+ return err;
+}
+
+static int fl_delete(struct tcf_proto *tp, unsigned long arg)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
+
+ rhashtable_remove_fast(&head->ht, &f->ht_node,
+ head->ht_params);
+ list_del_rcu(&f->list);
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, fl_destroy_filter);
+ return 0;
+}
+
+static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct cls_fl_filter *f;
+
+ list_for_each_entry_rcu(f, &head->filters, list) {
+ if (arg->count < arg->skip)
+ goto skip;
+ if (arg->fn(tp, (unsigned long) f, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+skip:
+ arg->count++;
+ }
+}
+
+static int fl_dump_key_val(struct sk_buff *skb,
+ void *val, int val_type,
+ void *mask, int mask_type, int len)
+{
+ int err;
+
+ if (!memchr_inv(mask, 0, len))
+ return 0;
+ err = nla_put(skb, val_type, len, val);
+ if (err)
+ return err;
+ if (mask_type != TCA_FLOWER_UNSPEC) {
+ err = nla_put(skb, mask_type, len, mask);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
+ struct sk_buff *skb, struct tcmsg *t)
+{
+ struct cls_fl_head *head = rtnl_dereference(tp->root);
+ struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
+ struct nlattr *nest;
+ struct fl_flow_key *key, *mask;
+
+ if (!f)
+ return skb->len;
+
+ t->tcm_handle = f->handle;
+
+ nest = nla_nest_start(skb, TCA_OPTIONS);
+ if (!nest)
+ goto nla_put_failure;
+
+ if (f->res.classid &&
+ nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
+ goto nla_put_failure;
+
+ key = &f->key;
+ mask = &head->mask.key;
+
+ if (mask->indev_ifindex) {
+ struct net_device *dev;
+
+ dev = __dev_get_by_index(net, key->indev_ifindex);
+ if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
+ goto nla_put_failure;
+ }
+
+ if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
+ mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
+ sizeof(key->eth.dst)) ||
+ fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
+ mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
+ sizeof(key->eth.src)) ||
+ fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
+ &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.n_proto)))
+ goto nla_put_failure;
+ if ((key->basic.n_proto == htons(ETH_P_IP) ||
+ key->basic.n_proto == htons(ETH_P_IPV6)) &&
+ fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+ &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
+ sizeof(key->basic.ip_proto)))
+ goto nla_put_failure;
+
+ if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
+ (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
+ &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ sizeof(key->ipv4.src)) ||
+ fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
+ &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
+ sizeof(key->ipv4.dst))))
+ goto nla_put_failure;
+ else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
+ (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
+ &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(key->ipv6.src)) ||
+ fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
+ &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(key->ipv6.dst))))
+ goto nla_put_failure;
+
+ if (key->basic.ip_proto == IPPROTO_TCP &&
+ (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
+ &mask->tp.src, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.src)) ||
+ fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
+ &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.dst))))
+ goto nla_put_failure;
+ else if (key->basic.ip_proto == IPPROTO_UDP &&
+ (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
+ &mask->tp.src, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.src)) ||
+ fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
+ &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp.dst))))
+ goto nla_put_failure;
+
+ if (tcf_exts_dump(skb, &f->exts))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest);
+
+ if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -1;
+}
+
+static struct tcf_proto_ops cls_fl_ops __read_mostly = {
+ .kind = "flower",
+ .classify = fl_classify,
+ .init = fl_init,
+ .destroy = fl_destroy,
+ .get = fl_get,
+ .change = fl_change,
+ .delete = fl_delete,
+ .walk = fl_walk,
+ .dump = fl_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init cls_fl_init(void)
+{
+ return register_tcf_proto_ops(&cls_fl_ops);
+}
+
+static void __exit cls_fl_exit(void)
+{
+ unregister_tcf_proto_ops(&cls_fl_ops);
+}
+
+module_init(cls_fl_init);
+module_exit(cls_fl_exit);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("Flower classifier");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1e1c89e51a11..c5b9db84d069 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1818,13 +1818,8 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
continue;
err = tp->classify(skb, tp, res);
- if (err >= 0) {
-#ifdef CONFIG_NET_CLS_ACT
- if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
- skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
-#endif
+ if (err >= 0)
return err;
- }
}
return -1;
}
@@ -1836,23 +1831,22 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
int err = 0;
#ifdef CONFIG_NET_CLS_ACT
const struct tcf_proto *otp = tp;
+ int limit = 0;
reclassify:
#endif
err = tc_classify_compat(skb, tp, res);
#ifdef CONFIG_NET_CLS_ACT
if (err == TC_ACT_RECLASSIFY) {
- u32 verd = G_TC_VERD(skb->tc_verd);
tp = otp;
- if (verd++ >= MAX_REC_LOOP) {
+ if (unlikely(limit++ >= MAX_REC_LOOP)) {
net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
tp->q->ops->id,
tp->prio & 0xffff,
ntohs(tp->protocol));
return TC_ACT_SHOT;
}
- skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
goto reclassify;
}
#endif
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index c009eb9045ce..93d5742dc7e0 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -18,7 +18,7 @@
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
#include <net/red.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
/*
CHOKe stateless AQM for fair bandwidth allocation
@@ -133,16 +133,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
--sch->q.qlen;
}
-/* private part of skb->cb[] that a qdisc is allowed to use
- * is limited to QDISC_CB_PRIV_LEN bytes.
- * As a flow key might be too large, we store a part of it only.
- */
-#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
-
struct choke_skb_cb {
u16 classid;
u8 keys_valid;
- u8 keys[QDISC_CB_PRIV_LEN - 3];
+ struct flow_keys_digest keys;
};
static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -176,19 +170,19 @@ static bool choke_match_flow(struct sk_buff *skb1,
if (!choke_skb_cb(skb1)->keys_valid) {
choke_skb_cb(skb1)->keys_valid = 1;
- skb_flow_dissect(skb1, &temp);
- memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
+ skb_flow_dissect_flow_keys(skb1, &temp);
+ make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
}
if (!choke_skb_cb(skb2)->keys_valid) {
choke_skb_cb(skb2)->keys_valid = 1;
- skb_flow_dissect(skb2, &temp);
- memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
+ skb_flow_dissect_flow_keys(skb2, &temp);
+ make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
}
return !memcmp(&choke_skb_cb(skb1)->keys,
&choke_skb_cb(skb2)->keys,
- CHOKE_K_LEN);
+ sizeof(choke_skb_cb(skb1)->keys));
}
/*
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 7a0bdb16ac92..535007d5f0b5 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -6,7 +6,7 @@
*
* Implemented on linux by :
* Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -109,6 +109,7 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
[TCA_CODEL_LIMIT] = { .type = NLA_U32 },
[TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
[TCA_CODEL_ECN] = { .type = NLA_U32 },
+ [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
};
static int codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -133,6 +134,12 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
}
+ if (tb[TCA_CODEL_CE_THRESHOLD]) {
+ u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
+
+ q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
if (tb[TCA_CODEL_INTERVAL]) {
u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
@@ -201,7 +208,10 @@ static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_CODEL_ECN,
q->params.ecn))
goto nla_put_failure;
-
+ if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+ nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
+ codel_time_to_us(q->params.ce_threshold)))
+ goto nla_put_failure;
return nla_nest_end(skb, opts);
nla_put_failure:
@@ -220,6 +230,7 @@ static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
.ldelay = codel_time_to_us(q->vars.ldelay),
.dropping = q->vars.dropping,
.ecn_mark = q->stats.ecn_mark,
+ .ce_mark = q->stats.ce_mark,
};
if (q->vars.dropping) {
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index c244c45b78d7..d75993f89fac 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -6,7 +6,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
*/
#include <linux/module.h>
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#include <net/flow_keys.h>
#include <net/codel.h>
/* Fair Queue CoDel.
@@ -68,15 +67,9 @@ struct fq_codel_sched_data {
};
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
- const struct sk_buff *skb)
+ struct sk_buff *skb)
{
- struct flow_keys keys;
- unsigned int hash;
-
- skb_flow_dissect(skb, &keys);
- hash = jhash_3words((__force u32)keys.dst,
- (__force u32)keys.src ^ keys.ip_proto,
- (__force u32)keys.ports, q->perturbation);
+ u32 hash = skb_get_hash_perturb(skb, q->perturbation);
return reciprocal_scale(hash, q->flows_cnt);
}
@@ -299,6 +292,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
[TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
[TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
[TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
+ [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
};
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -329,6 +323,12 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
}
+ if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
+ u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
+
+ q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+ }
+
if (tb[TCA_FQ_CODEL_INTERVAL]) {
u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
@@ -448,6 +448,11 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
q->flows_cnt))
goto nla_put_failure;
+ if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+ nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
+ codel_time_to_us(q->cparams.ce_threshold)))
+ goto nla_put_failure;
+
return nla_nest_end(skb, opts);
nla_put_failure:
@@ -466,6 +471,7 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.qdisc_stats.drop_overlimit = q->drop_overlimit;
st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
st.qdisc_stats.new_flow_count = q->new_flow_count;
+ st.qdisc_stats.ce_mark = q->cstats.ce_mark;
list_for_each(pos, &q->new_flows)
st.qdisc_stats.new_flows_len++;
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 634529e0ce6b..abb9f2fec28f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -165,7 +165,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* if no default DP has been configured. This
* allows for DP flows to be left untouched.
*/
- if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
+ if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
+ sch->limit))
return qdisc_enqueue_tail(skb, sch);
else
goto drop;
@@ -397,7 +398,10 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
q->DP = dp;
q->prio = prio;
- q->limit = ctl->limit;
+ if (ctl->limit > sch->limit)
+ q->limit = sch->limit;
+ else
+ q->limit = ctl->limit;
if (q->backlog == 0)
red_end_of_idle_period(&q->vars);
@@ -414,6 +418,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_STAB] = { .len = 256 },
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
[TCA_GRED_MAX_P] = { .type = NLA_U32 },
+ [TCA_GRED_LIMIT] = { .type = NLA_U32 },
};
static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -433,11 +438,15 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
if (err < 0)
return err;
- if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
+ if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
+ if (tb[TCA_GRED_LIMIT] != NULL)
+ sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
return gred_change_table_def(sch, opt);
+ }
if (tb[TCA_GRED_PARMS] == NULL ||
- tb[TCA_GRED_STAB] == NULL)
+ tb[TCA_GRED_STAB] == NULL ||
+ tb[TCA_GRED_LIMIT] != NULL)
return -EINVAL;
max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
@@ -501,6 +510,14 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
return -EINVAL;
+ if (tb[TCA_GRED_LIMIT])
+ sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
+ else {
+ u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
+
+ sch->limit = qlen * psched_mtu(qdisc_dev(sch));
+ }
+
return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
}
@@ -531,6 +548,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
+ goto nla_put_failure;
+
parms = nla_nest_start(skb, TCA_GRED_PARMS);
if (parms == NULL)
goto nla_put_failure;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 15d3aabfe250..9d15cb6b8cb1 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
-#include <net/flow_keys.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
@@ -176,22 +175,6 @@ static u32 hhf_time_stamp(void)
return jiffies;
}
-static unsigned int skb_hash(const struct hhf_sched_data *q,
- const struct sk_buff *skb)
-{
- struct flow_keys keys;
- unsigned int hash;
-
- if (skb->sk && skb->sk->sk_hash)
- return skb->sk->sk_hash;
-
- skb_flow_dissect(skb, &keys);
- hash = jhash_3words((__force u32)keys.dst,
- (__force u32)keys.src ^ keys.ip_proto,
- (__force u32)keys.ports, q->perturbation);
- return hash;
-}
-
/* Looks up a heavy-hitter flow in a chaining list of table T. */
static struct hh_flow_state *seek_list(const u32 hash,
struct list_head *head,
@@ -280,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
}
/* Get hashed flow-id of the skb. */
- hash = skb_hash(q, skb);
+ hash = skb_get_hash_perturb(skb, q->perturbation);
/* Check if this packet belongs to an already established HH flow. */
flow_pos = hash & HHF_BIT_MASK;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 4cdbfb85686a..e7c648fa9dc3 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -12,16 +12,10 @@
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
+
#include <net/netlink.h>
#include <net/pkt_sched.h>
-
-struct ingress_qdisc_data {
- struct tcf_proto __rcu *filter_list;
-};
-
-/* ------------------------- Class/flow operations ------------------------- */
-
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
@@ -49,57 +43,24 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
unsigned long cl)
{
- struct ingress_qdisc_data *p = qdisc_priv(sch);
-
- return &p->filter_list;
-}
-
-/* --------------------------- Qdisc operations ---------------------------- */
+ struct net_device *dev = qdisc_dev(sch);
-static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
-{
- struct ingress_qdisc_data *p = qdisc_priv(sch);
- struct tcf_result res;
- struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
- int result;
-
- result = tc_classify(skb, fl, &res);
-
- qdisc_bstats_update(sch, skb);
- switch (result) {
- case TC_ACT_SHOT:
- result = TC_ACT_SHOT;
- qdisc_qstats_drop(sch);
- break;
- case TC_ACT_STOLEN:
- case TC_ACT_QUEUED:
- result = TC_ACT_STOLEN;
- break;
- case TC_ACT_RECLASSIFY:
- case TC_ACT_OK:
- skb->tc_index = TC_H_MIN(res.classid);
- default:
- result = TC_ACT_OK;
- break;
- }
-
- return result;
+ return &dev->ingress_cl_list;
}
-/* ------------------------------------------------------------- */
-
static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
{
net_inc_ingress_queue();
+ sch->flags |= TCQ_F_CPUSTATS;
return 0;
}
static void ingress_destroy(struct Qdisc *sch)
{
- struct ingress_qdisc_data *p = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
- tcf_destroy_chain(&p->filter_list);
+ tcf_destroy_chain(&dev->ingress_cl_list);
net_dec_ingress_queue();
}
@@ -110,6 +71,7 @@ static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
+
return nla_nest_end(skb, nest);
nla_put_failure:
@@ -130,8 +92,6 @@ static const struct Qdisc_class_ops ingress_class_ops = {
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
- .priv_size = sizeof(struct ingress_qdisc_data),
- .enqueue = ingress_enqueue,
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
@@ -148,6 +108,7 @@ static void __exit ingress_module_exit(void)
unregister_qdisc(&ingress_qdisc_ops);
}
-module_init(ingress_module_init)
-module_exit(ingress_module_exit)
+module_init(ingress_module_init);
+module_exit(ingress_module_exit);
+
MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 956ead2cab9a..5abd1d9de989 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -440,9 +440,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
struct Qdisc *rootq = qdisc_root(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
- q->duplicate = 0;
- qdisc_enqueue_root(skb2, rootq);
+ q->duplicate = 0;
+ rootq->enqueue(skb2, rootq);
q->duplicate = dupsave;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 5819dd82630d..4b815193326c 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -26,7 +26,6 @@
#include <net/ip.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
-#include <net/flow_keys.h>
/*
* SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -285,9 +284,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int i;
u32 p_min = ~0;
u32 minqlen = ~0;
- u32 r, slot, salt, sfbhash;
+ u32 r, sfbhash;
+ u32 slot = q->slot;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- struct flow_keys keys;
if (unlikely(sch->q.qlen >= q->limit)) {
qdisc_qstats_overlimit(sch);
@@ -309,22 +308,17 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
fl = rcu_dereference_bh(q->filter_list);
if (fl) {
+ u32 salt;
+
/* If using external classifiers, get result and record it. */
if (!sfb_classify(skb, fl, &ret, &salt))
goto other_drop;
- keys.src = salt;
- keys.dst = 0;
- keys.ports = 0;
+ sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
} else {
- skb_flow_dissect(skb, &keys);
+ sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
}
- slot = q->slot;
- sfbhash = jhash_3words((__force u32)keys.dst,
- (__force u32)keys.src,
- (__force u32)keys.ports,
- q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -356,10 +350,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(p_min >= SFB_MAX_PROB)) {
/* Inelastic flow */
if (q->double_buffering) {
- sfbhash = jhash_3words((__force u32)keys.dst,
- (__force u32)keys.src,
- (__force u32)keys.ports,
- q->bins[slot].perturbation);
+ sfbhash = skb_get_hash_perturb(skb,
+ q->bins[slot].perturbation);
if (!sfbhash)
sfbhash = 1;
sfb_skb_cb(skb)->hashes[slot] = sfbhash;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b877140beda5..7d1492663360 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -23,7 +23,6 @@
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#include <net/flow_keys.h>
#include <net/red.h>
@@ -156,30 +155,10 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_MAX_FLOWS];
}
-/*
- * In order to be able to quickly rehash our queue when timer changes
- * q->perturbation, we store flow_keys in skb->cb[]
- */
-struct sfq_skb_cb {
- struct flow_keys keys;
-};
-
-static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
-{
- qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
- return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
static unsigned int sfq_hash(const struct sfq_sched_data *q,
const struct sk_buff *skb)
{
- const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
- unsigned int hash;
-
- hash = jhash_3words((__force u32)keys->dst,
- (__force u32)keys->src ^ keys->ip_proto,
- (__force u32)keys->ports, q->perturbation);
- return hash & (q->divisor - 1);
+ return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
}
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -196,10 +175,8 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return TC_H_MIN(skb->priority);
fl = rcu_dereference_bh(q->filter_list);
- if (!fl) {
- skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
+ if (!fl)
return sfq_hash(q, skb) + 1;
- }
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
result = tc_classify(skb, fl, &res);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0e4198ee2370..e917d27328ea 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -331,8 +331,9 @@ out:
rt = (struct rt6_info *)dst;
t->dst = dst;
- t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
- pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
+ t->dst_cookie = rt6_get_cookie(rt);
+ pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
+ &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
&fl6->saddr);
} else {
t->dst = NULL;
@@ -635,7 +636,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
struct sctp6_sock *newsctp6sk;
- newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
+ newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
if (!newsk)
goto out;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 53b7acde9aa3..59e80356672b 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -550,7 +550,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
struct sctp_association *asoc)
{
struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
- sk->sk_prot);
+ sk->sk_prot, 0);
struct inet_sock *newinet;
if (!newsk)
diff --git a/net/socket.c b/net/socket.c
index 884e32997698..9963a0b53a64 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -576,9 +576,6 @@ void sock_release(struct socket *sock)
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
pr_err("%s: fasync list not empty!\n", __func__);
- if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
- return;
-
this_cpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
@@ -1213,9 +1210,9 @@ int sock_create(int family, int type, int protocol, struct socket **res)
}
EXPORT_SYMBOL(sock_create);
-int sock_create_kern(int family, int type, int protocol, struct socket **res)
+int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
{
- return __sock_create(&init_net, family, type, protocol, res, 1);
+ return __sock_create(net, family, type, protocol, res, 1);
}
EXPORT_SYMBOL(sock_create_kern);
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 055453d48668..658bc3ac8008 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -15,97 +15,361 @@
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
#include <net/ip_fib.h>
#include <net/switchdev.h>
/**
- * netdev_switch_parent_id_get - Get ID of a switch
+ * switchdev_port_attr_get - Get port attribute
+ *
* @dev: port device
- * @psid: switch ID
+ * @attr: attribute to get
+ */
+int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ const struct switchdev_ops *ops = dev->switchdev_ops;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ struct switchdev_attr first = {
+ .id = SWITCHDEV_ATTR_UNDEFINED
+ };
+ int err = -EOPNOTSUPP;
+
+ if (ops && ops->switchdev_port_attr_get)
+ return ops->switchdev_port_attr_get(dev, attr);
+
+ if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+ return err;
+
+ /* Switch device port(s) may be stacked under
+ * bond/team/vlan dev, so recurse down to get attr on
+ * each port. Return -ENODATA if attr values don't
+ * compare across ports.
+ */
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = switchdev_port_attr_get(lower_dev, attr);
+ if (err)
+ break;
+ if (first.id == SWITCHDEV_ATTR_UNDEFINED)
+ first = *attr;
+ else if (memcmp(&first, attr, sizeof(*attr)))
+ return -ENODATA;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
+
+static int __switchdev_port_attr_set(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ const struct switchdev_ops *ops = dev->switchdev_ops;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (ops && ops->switchdev_port_attr_set)
+ return ops->switchdev_port_attr_set(dev, attr);
+
+ if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+ return err;
+
+ /* Switch device port(s) may be stacked under
+ * bond/team/vlan dev, so recurse down to set attr on
+ * each port.
+ */
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = __switchdev_port_attr_set(lower_dev, attr);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+struct switchdev_attr_set_work {
+ struct work_struct work;
+ struct net_device *dev;
+ struct switchdev_attr attr;
+};
+
+static void switchdev_port_attr_set_work(struct work_struct *work)
+{
+ struct switchdev_attr_set_work *asw =
+ container_of(work, struct switchdev_attr_set_work, work);
+ int err;
+
+ rtnl_lock();
+ err = switchdev_port_attr_set(asw->dev, &asw->attr);
+ if (err && err != -EOPNOTSUPP)
+ netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
+ err, asw->attr.id);
+ rtnl_unlock();
+
+ dev_put(asw->dev);
+ kfree(work);
+}
+
+static int switchdev_port_attr_set_defer(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct switchdev_attr_set_work *asw;
+
+ asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
+ if (!asw)
+ return -ENOMEM;
+
+ INIT_WORK(&asw->work, switchdev_port_attr_set_work);
+
+ dev_hold(dev);
+ asw->dev = dev;
+ memcpy(&asw->attr, attr, sizeof(asw->attr));
+
+ schedule_work(&asw->work);
+
+ return 0;
+}
+
+/**
+ * switchdev_port_attr_set - Set port attribute
*
- * Get ID of a switch this port is part of.
+ * @dev: port device
+ * @attr: attribute to set
+ *
+ * Use a 2-phase prepare-commit transaction model to ensure
+ * system is not left in a partially updated state due to
+ * failure from driver/device.
*/
-int netdev_switch_parent_id_get(struct net_device *dev,
- struct netdev_phys_item_id *psid)
+int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
+{
+ int err;
+
+ if (!rtnl_is_locked()) {
+ /* Running prepare-commit transaction across stacked
+ * devices requires nothing moves, so if rtnl_lock is
+ * not held, schedule a worker thread to hold rtnl_lock
+ * while setting attr.
+ */
+
+ return switchdev_port_attr_set_defer(dev, attr);
+ }
+
+ /* Phase I: prepare for attr set. Driver/device should fail
+ * here if there are going to be issues in the commit phase,
+ * such as lack of resources or support. The driver/device
+ * should reserve resources needed for the commit phase here,
+ * but should not commit the attr.
+ */
+
+ attr->trans = SWITCHDEV_TRANS_PREPARE;
+ err = __switchdev_port_attr_set(dev, attr);
+ if (err) {
+ /* Prepare phase failed: abort the transaction. Any
+ * resources reserved in the prepare phase are
+ * released.
+ */
+
+ attr->trans = SWITCHDEV_TRANS_ABORT;
+ __switchdev_port_attr_set(dev, attr);
+
+ return err;
+ }
+
+ /* Phase II: commit attr set. This cannot fail as a fault
+ * of driver/device. If it does, it's a bug in the driver/device
+ * because the driver said everythings was OK in phase I.
+ */
+
+ attr->trans = SWITCHDEV_TRANS_COMMIT;
+ err = __switchdev_port_attr_set(dev, attr);
+ BUG_ON(err);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
+
+static int __switchdev_port_obj_add(struct net_device *dev,
+ struct switchdev_obj *obj)
{
- const struct swdev_ops *ops = dev->swdev_ops;
+ const struct switchdev_ops *ops = dev->switchdev_ops;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (ops && ops->switchdev_port_obj_add)
+ return ops->switchdev_port_obj_add(dev, obj);
+
+ /* Switch device port(s) may be stacked under
+ * bond/team/vlan dev, so recurse down to add object on
+ * each port.
+ */
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = __switchdev_port_obj_add(lower_dev, obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * switchdev_port_obj_add - Add port object
+ *
+ * @dev: port device
+ * @obj: object to add
+ *
+ * Use a 2-phase prepare-commit transaction model to ensure
+ * system is not left in a partially updated state due to
+ * failure from driver/device.
+ *
+ * rtnl_lock must be held.
+ */
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
+{
+ int err;
+
+ ASSERT_RTNL();
- if (!ops || !ops->swdev_parent_id_get)
- return -EOPNOTSUPP;
- return ops->swdev_parent_id_get(dev, psid);
+ /* Phase I: prepare for obj add. Driver/device should fail
+ * here if there are going to be issues in the commit phase,
+ * such as lack of resources or support. The driver/device
+ * should reserve resources needed for the commit phase here,
+ * but should not commit the obj.
+ */
+
+ obj->trans = SWITCHDEV_TRANS_PREPARE;
+ err = __switchdev_port_obj_add(dev, obj);
+ if (err) {
+ /* Prepare phase failed: abort the transaction. Any
+ * resources reserved in the prepare phase are
+ * released.
+ */
+
+ obj->trans = SWITCHDEV_TRANS_ABORT;
+ __switchdev_port_obj_add(dev, obj);
+
+ return err;
+ }
+
+ /* Phase II: commit obj add. This cannot fail as a fault
+ * of driver/device. If it does, it's a bug in the driver/device
+ * because the driver said everythings was OK in phase I.
+ */
+
+ obj->trans = SWITCHDEV_TRANS_COMMIT;
+ err = __switchdev_port_obj_add(dev, obj);
+ WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
+EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
/**
- * netdev_switch_port_stp_update - Notify switch device port of STP
- * state change
+ * switchdev_port_obj_del - Delete port object
+ *
* @dev: port device
- * @state: port STP state
+ * @obj: object to delete
+ */
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
+{
+ const struct switchdev_ops *ops = dev->switchdev_ops;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (ops && ops->switchdev_port_obj_del)
+ return ops->switchdev_port_obj_del(dev, obj);
+
+ /* Switch device port(s) may be stacked under
+ * bond/team/vlan dev, so recurse down to delete object on
+ * each port.
+ */
+
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = switchdev_port_obj_del(lower_dev, obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+
+/**
+ * switchdev_port_obj_dump - Dump port objects
*
- * Notify switch device port of bridge port STP state change.
+ * @dev: port device
+ * @obj: object to dump
*/
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
{
- const struct swdev_ops *ops = dev->swdev_ops;
+ const struct switchdev_ops *ops = dev->switchdev_ops;
struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
- if (ops && ops->swdev_port_stp_update)
- return ops->swdev_port_stp_update(dev, state);
+ if (ops && ops->switchdev_port_obj_dump)
+ return ops->switchdev_port_obj_dump(dev, obj);
+
+ /* Switch device port(s) may be stacked under
+ * bond/team/vlan dev, so recurse down to dump objects on
+ * first port at bottom of stack.
+ */
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = netdev_switch_port_stp_update(lower_dev, state);
- if (err && err != -EOPNOTSUPP)
- return err;
+ err = switchdev_port_obj_dump(lower_dev, obj);
+ break;
}
return err;
}
-EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
+EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
-static DEFINE_MUTEX(netdev_switch_mutex);
-static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
+static DEFINE_MUTEX(switchdev_mutex);
+static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
/**
- * register_netdev_switch_notifier - Register notifier
+ * register_switchdev_notifier - Register notifier
* @nb: notifier_block
*
* Register switch device notifier. This should be used by code
* which needs to monitor events happening in particular device.
* Return values are same as for atomic_notifier_chain_register().
*/
-int register_netdev_switch_notifier(struct notifier_block *nb)
+int register_switchdev_notifier(struct notifier_block *nb)
{
int err;
- mutex_lock(&netdev_switch_mutex);
- err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
- mutex_unlock(&netdev_switch_mutex);
+ mutex_lock(&switchdev_mutex);
+ err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
+ mutex_unlock(&switchdev_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(register_switchdev_notifier);
/**
- * unregister_netdev_switch_notifier - Unregister notifier
+ * unregister_switchdev_notifier - Unregister notifier
* @nb: notifier_block
*
* Unregister switch device notifier.
* Return values are same as for atomic_notifier_chain_unregister().
*/
-int unregister_netdev_switch_notifier(struct notifier_block *nb)
+int unregister_switchdev_notifier(struct notifier_block *nb)
{
int err;
- mutex_lock(&netdev_switch_mutex);
- err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
- mutex_unlock(&netdev_switch_mutex);
+ mutex_lock(&switchdev_mutex);
+ err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
+ mutex_unlock(&switchdev_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
/**
- * call_netdev_switch_notifiers - Call notifiers
+ * call_switchdev_notifiers - Call notifiers
* @val: value passed unmodified to notifier function
* @dev: port device
* @info: notifier information data
@@ -114,146 +378,387 @@ EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
* when it needs to propagate hardware event.
* Return values are same as for atomic_notifier_call_chain().
*/
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
- struct netdev_switch_notifier_info *info)
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info)
{
int err;
info->dev = dev;
- mutex_lock(&netdev_switch_mutex);
- err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
- mutex_unlock(&netdev_switch_mutex);
+ mutex_lock(&switchdev_mutex);
+ err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+ mutex_unlock(&switchdev_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
+EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
/**
- * netdev_switch_port_bridge_setlink - Notify switch device port of bridge
- * port attributes
+ * switchdev_port_bridge_getlink - Get bridge port attributes
*
* @dev: port device
- * @nlh: netlink msg with bridge port attributes
- * @flags: bridge setlink flags
*
- * Notify switch device port of bridge port attributes
+ * Called for SELF on rtnl_bridge_getlink to get bridge port
+ * attributes.
*/
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
{
- const struct net_device_ops *ops = dev->netdev_ops;
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+ };
+ u16 mode = BRIDGE_MODE_UNDEF;
+ u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+ int err;
- if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
- return 0;
+ err = switchdev_port_attr_get(dev, &attr);
+ if (err)
+ return err;
- if (!ops->ndo_bridge_setlink)
- return -EOPNOTSUPP;
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
+ attr.u.brport_flags, mask, nlflags);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
- return ops->ndo_bridge_setlink(dev, nlh, flags);
+static int switchdev_port_br_setflag(struct net_device *dev,
+ struct nlattr *nlattr,
+ unsigned long brport_flag)
+{
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+ };
+ u8 flag = nla_get_u8(nlattr);
+ int err;
+
+ err = switchdev_port_attr_get(dev, &attr);
+ if (err)
+ return err;
+
+ if (flag)
+ attr.u.brport_flags |= brport_flag;
+ else
+ attr.u.brport_flags &= ~brport_flag;
+
+ return switchdev_port_attr_set(dev, &attr);
+}
+
+static const struct nla_policy
+switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
+ [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
+ [IFLA_BRPORT_COST] = { .type = NLA_U32 },
+ [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
+ [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
+ [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
+ [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 },
+ [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
+ [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
+ [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+};
+
+static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
+ struct nlattr *protinfo)
+{
+ struct nlattr *attr;
+ int rem;
+ int err;
+
+ err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
+ switchdev_port_bridge_policy);
+ if (err)
+ return err;
+
+ nla_for_each_nested(attr, protinfo, rem) {
+ switch (nla_type(attr)) {
+ case IFLA_BRPORT_LEARNING:
+ err = switchdev_port_br_setflag(dev, attr,
+ BR_LEARNING);
+ break;
+ case IFLA_BRPORT_LEARNING_SYNC:
+ err = switchdev_port_br_setflag(dev, attr,
+ BR_LEARNING_SYNC);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int switchdev_port_br_afspec(struct net_device *dev,
+ struct nlattr *afspec,
+ int (*f)(struct net_device *dev,
+ struct switchdev_obj *obj))
+{
+ struct nlattr *attr;
+ struct bridge_vlan_info *vinfo;
+ struct switchdev_obj obj = {
+ .id = SWITCHDEV_OBJ_PORT_VLAN,
+ };
+ struct switchdev_obj_vlan *vlan = &obj.u.vlan;
+ int rem;
+ int err;
+
+ nla_for_each_nested(attr, afspec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
+ continue;
+ if (nla_len(attr) != sizeof(struct bridge_vlan_info))
+ return -EINVAL;
+ vinfo = nla_data(attr);
+ vlan->flags = vinfo->flags;
+ if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+ if (vlan->vid_start)
+ return -EINVAL;
+ vlan->vid_start = vinfo->vid;
+ } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
+ if (!vlan->vid_start)
+ return -EINVAL;
+ vlan->vid_end = vinfo->vid;
+ if (vlan->vid_end <= vlan->vid_start)
+ return -EINVAL;
+ err = f(dev, &obj);
+ if (err)
+ return err;
+ memset(vlan, 0, sizeof(*vlan));
+ } else {
+ if (vlan->vid_start)
+ return -EINVAL;
+ vlan->vid_start = vinfo->vid;
+ vlan->vid_end = vinfo->vid;
+ err = f(dev, &obj);
+ if (err)
+ return err;
+ memset(vlan, 0, sizeof(*vlan));
+ }
+ }
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink);
/**
- * netdev_switch_port_bridge_dellink - Notify switch device port of bridge
- * port attribute delete
+ * switchdev_port_bridge_setlink - Set bridge port attributes
*
* @dev: port device
- * @nlh: netlink msg with bridge port attributes
- * @flags: bridge setlink flags
+ * @nlh: netlink header
+ * @flags: netlink flags
*
- * Notify switch device port of bridge port attribute delete
+ * Called for SELF on rtnl_bridge_setlink to set bridge port
+ * attributes.
*/
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
{
- const struct net_device_ops *ops = dev->netdev_ops;
+ struct nlattr *protinfo;
+ struct nlattr *afspec;
+ int err = 0;
- if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
- return 0;
+ protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_PROTINFO);
+ if (protinfo) {
+ err = switchdev_port_br_setlink_protinfo(dev, protinfo);
+ if (err)
+ return err;
+ }
- if (!ops->ndo_bridge_dellink)
- return -EOPNOTSUPP;
+ afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_AF_SPEC);
+ if (afspec)
+ err = switchdev_port_br_afspec(dev, afspec,
+ switchdev_port_obj_add);
- return ops->ndo_bridge_dellink(dev, nlh, flags);
+ return err;
}
-EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
/**
- * ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
- * op for master devices
+ * switchdev_port_bridge_dellink - Set bridge port attributes
*
* @dev: port device
- * @nlh: netlink msg with bridge port attributes
- * @flags: bridge setlink flags
+ * @nlh: netlink header
+ * @flags: netlink flags
*
- * Notify master device slaves of bridge port attributes
+ * Called for SELF on rtnl_bridge_dellink to set bridge port
+ * attributes.
*/
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_bridge_dellink(struct net_device *dev,
+ struct nlmsghdr *nlh, u16 flags)
{
- struct net_device *lower_dev;
- struct list_head *iter;
- int ret = 0, err = 0;
+ struct nlattr *afspec;
- if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
- return ret;
+ afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+ IFLA_AF_SPEC);
+ if (afspec)
+ return switchdev_port_br_afspec(dev, afspec,
+ switchdev_port_obj_del);
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = netdev_switch_port_bridge_setlink(lower_dev, nlh, flags);
- if (err && err != -EOPNOTSUPP)
- ret = err;
- }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
- return ret;
+/**
+ * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
+ *
+ * @ndmsg: netlink hdr
+ * @nlattr: netlink attributes
+ * @dev: port device
+ * @addr: MAC address to add
+ * @vid: VLAN to add
+ *
+ * Add FDB entry to switch device.
+ */
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid, u16 nlm_flags)
+{
+ struct switchdev_obj obj = {
+ .id = SWITCHDEV_OBJ_PORT_FDB,
+ .u.fdb = {
+ .addr = addr,
+ .vid = vid,
+ },
+ };
+
+ return switchdev_port_obj_add(dev, &obj);
}
-EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
/**
- * ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
- * op for master devices
+ * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
*
+ * @ndmsg: netlink hdr
+ * @nlattr: netlink attributes
* @dev: port device
- * @nlh: netlink msg with bridge port attributes
- * @flags: bridge dellink flags
+ * @addr: MAC address to delete
+ * @vid: VLAN to delete
*
- * Notify master device slaves of bridge port attribute deletes
+ * Delete FDB entry from switch device.
*/
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 vid)
{
- struct net_device *lower_dev;
- struct list_head *iter;
- int ret = 0, err = 0;
+ struct switchdev_obj obj = {
+ .id = SWITCHDEV_OBJ_PORT_FDB,
+ .u.fdb = {
+ .addr = addr,
+ .vid = vid,
+ },
+ };
+
+ return switchdev_port_obj_del(dev, &obj);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
+
+struct switchdev_fdb_dump {
+ struct switchdev_obj obj;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ struct net_device *filter_dev;
+ int idx;
+};
+
+static int switchdev_port_fdb_dump_cb(struct net_device *dev,
+ struct switchdev_obj *obj)
+{
+ struct switchdev_fdb_dump *dump =
+ container_of(obj, struct switchdev_fdb_dump, obj);
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+ struct net_device *master = netdev_master_upper_dev_get(dev);
+
+ if (dump->idx < dump->cb->args[0])
+ goto skip;
+
+ if (master && dump->filter_dev != master)
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dev->ifindex;
+ ndm->ndm_state = NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
+ goto nla_put_failure;
+
+ if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
- if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
- return ret;
+/**
+ * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
+ *
+ * @skb: netlink skb
+ * @cb: netlink callback
+ * @dev: port device
+ * @filter_dev: filter device
+ * @idx:
+ *
+ * Delete FDB entry from switch device.
+ */
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev, int idx)
+{
+ struct switchdev_fdb_dump dump = {
+ .obj = {
+ .id = SWITCHDEV_OBJ_PORT_FDB,
+ .cb = switchdev_port_fdb_dump_cb,
+ },
+ .skb = skb,
+ .cb = cb,
+ .filter_dev = filter_dev,
+ .idx = idx,
+ };
+ int err;
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = netdev_switch_port_bridge_dellink(lower_dev, nlh, flags);
- if (err && err != -EOPNOTSUPP)
- ret = err;
- }
+ err = switchdev_port_obj_dump(dev, &dump.obj);
+ if (err)
+ return err;
- return ret;
+ return dump.idx;
}
-EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
-static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
+static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
{
- const struct swdev_ops *ops = dev->swdev_ops;
+ const struct switchdev_ops *ops = dev->switchdev_ops;
struct net_device *lower_dev;
struct net_device *port_dev;
struct list_head *iter;
/* Recusively search down until we find a sw port dev.
- * (A sw port dev supports swdev_parent_id_get).
+ * (A sw port dev supports switchdev_port_attr_get).
*/
- if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD &&
- ops && ops->swdev_parent_id_get)
+ if (ops && ops->switchdev_port_attr_get)
return dev;
netdev_for_each_lower_dev(dev, lower_dev, iter) {
- port_dev = netdev_switch_get_lowest_dev(lower_dev);
+ port_dev = switchdev_get_lowest_dev(lower_dev);
if (port_dev)
return port_dev;
}
@@ -261,10 +766,12 @@ static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
return NULL;
}
-static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
+static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
{
- struct netdev_phys_item_id psid;
- struct netdev_phys_item_id prev_psid;
+ struct switchdev_attr attr = {
+ .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+ };
+ struct switchdev_attr prev_attr;
struct net_device *dev = NULL;
int nhsel;
@@ -276,28 +783,29 @@ static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
if (!nh->nh_dev)
return NULL;
- dev = netdev_switch_get_lowest_dev(nh->nh_dev);
+ dev = switchdev_get_lowest_dev(nh->nh_dev);
if (!dev)
return NULL;
- if (netdev_switch_parent_id_get(dev, &psid))
+ if (switchdev_port_attr_get(dev, &attr))
return NULL;
if (nhsel > 0) {
- if (prev_psid.id_len != psid.id_len)
+ if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
return NULL;
- if (memcmp(prev_psid.id, psid.id, psid.id_len))
+ if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
+ attr.u.ppid.id_len))
return NULL;
}
- prev_psid = psid;
+ prev_attr = attr;
}
return dev;
}
/**
- * netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch
+ * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
*
* @dst: route's IPv4 destination address
* @dst_len: destination address length (prefix length)
@@ -307,13 +815,24 @@ static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
* @nlflags: netlink flags passed in (NLM_F_*)
* @tb_id: route table ID
*
- * Add IPv4 route entry to switch device.
+ * Add/modify switch IPv4 route entry.
*/
-int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id)
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 nlflags, u32 tb_id)
{
+ struct switchdev_obj fib_obj = {
+ .id = SWITCHDEV_OBJ_IPV4_FIB,
+ .u.ipv4_fib = {
+ .dst = dst,
+ .dst_len = dst_len,
+ .fi = fi,
+ .tos = tos,
+ .type = type,
+ .nlflags = nlflags,
+ .tb_id = tb_id,
+ },
+ };
struct net_device *dev;
- const struct swdev_ops *ops;
int err = 0;
/* Don't offload route if using custom ip rules or if
@@ -328,25 +847,20 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
if (fi->fib_net->ipv4.fib_offload_disabled)
return 0;
- dev = netdev_switch_get_dev_by_nhs(fi);
+ dev = switchdev_get_dev_by_nhs(fi);
if (!dev)
return 0;
- ops = dev->swdev_ops;
-
- if (ops->swdev_fib_ipv4_add) {
- err = ops->swdev_fib_ipv4_add(dev, htonl(dst), dst_len,
- fi, tos, type, nlflags,
- tb_id);
- if (!err)
- fi->fib_flags |= RTNH_F_OFFLOAD;
- }
- return err;
+ err = switchdev_port_obj_add(dev, &fib_obj);
+ if (!err)
+ fi->fib_flags |= RTNH_F_OFFLOAD;
+
+ return err == -EOPNOTSUPP ? 0 : err;
}
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
/**
- * netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch
+ * switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
*
* @dst: route's IPv4 destination address
* @dst_len: destination address length (prefix length)
@@ -357,38 +871,45 @@ EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
*
* Delete IPv4 route entry from switch device.
*/
-int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+ u8 tos, u8 type, u32 tb_id)
{
+ struct switchdev_obj fib_obj = {
+ .id = SWITCHDEV_OBJ_IPV4_FIB,
+ .u.ipv4_fib = {
+ .dst = dst,
+ .dst_len = dst_len,
+ .fi = fi,
+ .tos = tos,
+ .type = type,
+ .nlflags = 0,
+ .tb_id = tb_id,
+ },
+ };
struct net_device *dev;
- const struct swdev_ops *ops;
int err = 0;
if (!(fi->fib_flags & RTNH_F_OFFLOAD))
return 0;
- dev = netdev_switch_get_dev_by_nhs(fi);
+ dev = switchdev_get_dev_by_nhs(fi);
if (!dev)
return 0;
- ops = dev->swdev_ops;
- if (ops->swdev_fib_ipv4_del) {
- err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
- fi, tos, type, tb_id);
- if (!err)
- fi->fib_flags &= ~RTNH_F_OFFLOAD;
- }
+ err = switchdev_port_obj_del(dev, &fib_obj);
+ if (!err)
+ fi->fib_flags &= ~RTNH_F_OFFLOAD;
- return err;
+ return err == -EOPNOTSUPP ? 0 : err;
}
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
/**
- * netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation
+ * switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
*
* @fi: route FIB info structure
*/
-void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+void switchdev_fib_ipv4_abort(struct fib_info *fi)
{
/* There was a problem installing this route to the offload
* device. For now, until we come up with more refined
@@ -401,4 +922,4 @@ void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
fib_flush_external(fi->fib_net);
fi->fib_net->ipv4.fib_offload_disabled = true;
}
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index ba7daa864d44..48fd3b5a73fb 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -38,13 +38,6 @@
#include "addr.h"
#include "core.h"
-u32 tipc_own_addr(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- return tn->own_addr;
-}
-
/**
* in_own_cluster - test for cluster inclusion; <0.0.0> always matches
*/
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 7ba6d5c8ae40..93f7c983be33 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -41,10 +41,18 @@
#include <linux/tipc.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include "core.h"
#define TIPC_ZONE_MASK 0xff000000u
#define TIPC_CLUSTER_MASK 0xfffff000u
+static inline u32 tipc_own_addr(struct net *net)
+{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+ return tn->own_addr;
+}
+
static inline u32 tipc_zone_mask(u32 addr)
{
return addr & TIPC_ZONE_MASK;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index c5cbdcb1f0b5..4906ca3c0f3a 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -115,19 +115,15 @@ static void bclink_set_last_sent(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *bcl = tn->bcl;
- struct sk_buff *skb = skb_peek(&bcl->backlogq);
- if (skb)
- bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
- else
- bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
+ bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
}
u32 tipc_bclink_get_last_sent(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- return tn->bcl->fsm_msg_cnt;
+ return tn->bcl->silent_intv_cnt;
}
static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -212,16 +208,16 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
* or both sent and unsent messages (otherwise)
*/
if (tn->bclink->bcast_nodes.count)
- acked = tn->bcl->fsm_msg_cnt;
+ acked = tn->bcl->silent_intv_cnt;
else
- acked = tn->bcl->next_out_no;
+ acked = tn->bcl->snd_nxt;
} else {
/*
* Bail out if specified sequence number does not correspond
* to a message that has been sent and not yet acknowledged
*/
if (less(acked, buf_seqno(skb)) ||
- less(tn->bcl->fsm_msg_cnt, acked) ||
+ less(tn->bcl->silent_intv_cnt, acked) ||
less_eq(acked, n_ptr->bclink.acked))
goto exit;
}
@@ -803,9 +799,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
goto attr_msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
goto attr_msg_full;
prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
@@ -866,6 +862,27 @@ int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
return 0;
}
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
+{
+ int err;
+ u32 win;
+ struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+ if (!attrs[TIPC_NLA_LINK_PROP])
+ return -EINVAL;
+
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
+ if (err)
+ return err;
+
+ if (!props[TIPC_NLA_PROP_WIN])
+ return -EOPNOTSUPP;
+
+ win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+
+ return tipc_bclink_set_queue_limits(net, win);
+}
+
int tipc_bclink_init(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -893,7 +910,7 @@ int tipc_bclink_init(struct net *net)
__skb_queue_head_init(&bcl->backlogq);
__skb_queue_head_init(&bcl->deferdq);
skb_queue_head_init(&bcl->wakeupq);
- bcl->next_out_no = 1;
+ bcl->snd_nxt = 1;
spin_lock_init(&bclink->node.lock);
__skb_queue_head_init(&bclink->arrvq);
skb_queue_head_init(&bclink->inputq);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 4bdc12277d33..3c290a48f720 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -131,6 +131,7 @@ uint tipc_bclink_get_mtu(void);
int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
void tipc_bclink_wakeup_users(struct net *net);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
void tipc_bclink_input(struct net *net);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 70e3dacbf84a..00bc0e620532 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -71,8 +71,7 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
[TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
};
-static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
- bool shutting_down);
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr);
/**
* tipc_media_find - locates specified media object by name
@@ -324,7 +323,7 @@ restart:
res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
if (res) {
- bearer_disable(net, b_ptr, false);
+ bearer_disable(net, b_ptr);
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
name);
return -EINVAL;
@@ -344,7 +343,7 @@ restart:
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
{
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- tipc_link_reset_list(net, b_ptr->identity);
+ tipc_link_delete_list(net, b_ptr->identity);
tipc_disc_reset(net, b_ptr);
return 0;
}
@@ -354,8 +353,7 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
*
* Note: This routine assumes caller holds RTNL lock.
*/
-static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
- bool shutting_down)
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 i;
@@ -363,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
pr_info("Disabling bearer <%s>\n", b_ptr->name);
b_ptr->media->disable_media(b_ptr);
- tipc_link_delete_list(net, b_ptr->identity, shutting_down);
+ tipc_link_delete_list(net, b_ptr->identity);
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
@@ -541,7 +539,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
break;
case NETDEV_UNREGISTER:
case NETDEV_CHANGENAME:
- bearer_disable(dev_net(dev), b_ptr, false);
+ bearer_disable(dev_net(dev), b_ptr);
break;
}
return NOTIFY_OK;
@@ -583,7 +581,7 @@ void tipc_bearer_stop(struct net *net)
for (i = 0; i < MAX_BEARERS; i++) {
b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (b_ptr) {
- bearer_disable(net, b_ptr, true);
+ bearer_disable(net, b_ptr);
tn->bearer_list[i] = NULL;
}
}
@@ -747,7 +745,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
- bearer_disable(net, bearer, false);
+ bearer_disable(net, bearer);
rtnl_unlock();
return 0;
@@ -812,7 +810,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
char *name;
struct tipc_bearer *b;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
- struct net *net = genl_info_net(info);
+ struct net *net = sock_net(skb->sk);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 5cad243ee8fc..dc714d977768 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -38,9 +38,9 @@
#define _TIPC_BEARER_H
#include "netlink.h"
+#include "core.h"
#include <net/genetlink.h>
-#define MAX_BEARERS 2
#define MAX_MEDIA 3
#define MAX_NODES 4096
#define WSIZE 32
diff --git a/net/tipc/core.c b/net/tipc/core.c
index be1c9fa60b09..005ba5eb0ea4 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -68,7 +68,7 @@ static int __net_init tipc_init_net(struct net *net)
if (err)
goto out_nametbl;
- err = tipc_subscr_start(net);
+ err = tipc_topsrv_start(net);
if (err)
goto out_subscr;
return 0;
@@ -83,7 +83,7 @@ out_sk_rht:
static void __net_exit tipc_exit_net(struct net *net)
{
- tipc_subscr_stop(net);
+ tipc_topsrv_stop(net);
tipc_net_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 3dc68c7a966d..0fcf133d5cb7 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -60,16 +60,19 @@
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
-#include "node.h"
-#include "bearer.h"
-#include "bcast.h"
-#include "netlink.h"
-#include "link.h"
-#include "node.h"
-#include "msg.h"
+struct tipc_node;
+struct tipc_bearer;
+struct tipc_bcbearer;
+struct tipc_bclink;
+struct tipc_link;
+struct tipc_name_table;
+struct tipc_server;
#define TIPC_MOD_VER "2.0.0"
+#define NODE_HTABLE_SIZE 512
+#define MAX_BEARERS 3
+
extern int tipc_net_id __read_mostly;
extern int sysctl_tipc_rmem[3] __read_mostly;
extern int sysctl_tipc_named_timeout __read_mostly;
@@ -106,6 +109,26 @@ struct tipc_net {
atomic_t subscription_count;
};
+static inline u16 mod(u16 x)
+{
+ return x & 0xffffu;
+}
+
+static inline int less_eq(u16 left, u16 right)
+{
+ return mod(right - left) < 32768u;
+}
+
+static inline int more(u16 left, u16 right)
+{
+ return !less_eq(left, right);
+}
+
+static inline int less(u16 left, u16 right)
+{
+ return less_eq(left, right) && (mod(right) != mod(left));
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 43a515dc97b0..ca8b8e0f49b5 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -86,7 +86,7 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
*/
#define STARTING_EVT 856384768 /* link processing trigger */
#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
-#define TIMEOUT_EVT 560817u /* link timer expired */
+#define SILENCE_EVT 560817u /* timer dicovered silence from peer */
/*
* State value stored in 'failover_pkts'
@@ -106,6 +106,7 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
+static void link_set_timer(struct tipc_link *link, unsigned long time);
/*
* Simple link routines
*/
@@ -197,11 +198,12 @@ static void link_timeout(unsigned long data)
}
/* do all other link processing performed on a periodic basis */
- link_state_event(l_ptr, TIMEOUT_EVT);
-
+ if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
+ link_state_event(l_ptr, SILENCE_EVT);
+ l_ptr->silent_intv_cnt++;
if (skb_queue_len(&l_ptr->backlogq))
tipc_link_push_packets(l_ptr);
-
+ link_set_timer(l_ptr, l_ptr->keepalive_intv);
tipc_node_unlock(l_ptr->owner);
tipc_link_put(l_ptr);
}
@@ -233,8 +235,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
if (n_ptr->link_cnt >= MAX_BEARERS) {
tipc_addr_string_fill(addr_string, n_ptr->addr);
- pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
- n_ptr->link_cnt, addr_string, MAX_BEARERS);
+ pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
+ n_ptr->link_cnt, addr_string, MAX_BEARERS);
return NULL;
}
@@ -261,7 +263,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
/* note: peer i/f name is updated by reset/activate message */
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
l_ptr->owner = n_ptr;
- l_ptr->checkpoint = 1;
l_ptr->peer_session = INVALID_SESSION;
l_ptr->bearer_id = b_ptr->identity;
link_set_supervision_props(l_ptr, b_ptr->tolerance);
@@ -280,7 +281,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
l_ptr->mtu = l_ptr->advertised_mtu;
l_ptr->priority = b_ptr->priority;
tipc_link_set_queue_limits(l_ptr, b_ptr->window);
- l_ptr->next_out_no = 1;
+ l_ptr->snd_nxt = 1;
__skb_queue_head_init(&l_ptr->transmq);
__skb_queue_head_init(&l_ptr->backlogq);
__skb_queue_head_init(&l_ptr->deferdq);
@@ -311,8 +312,7 @@ void tipc_link_delete(struct tipc_link *l)
tipc_link_put(l);
}
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
- bool shutting_down)
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_link *link;
@@ -451,9 +451,9 @@ void tipc_link_reset(struct tipc_link *l_ptr)
if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
l_ptr->flags |= LINK_FAILINGOVER;
- l_ptr->failover_checkpt = l_ptr->next_in_no;
+ l_ptr->failover_checkpt = l_ptr->rcv_nxt;
pl->failover_pkts = FIRST_FAILOVER;
- pl->failover_checkpt = l_ptr->next_in_no;
+ pl->failover_checkpt = l_ptr->rcv_nxt;
pl->failover_skb = l_ptr->reasm_buf;
} else {
kfree_skb(l_ptr->reasm_buf);
@@ -469,36 +469,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
tipc_link_purge_backlog(l_ptr);
l_ptr->reasm_buf = NULL;
l_ptr->rcv_unacked = 0;
- l_ptr->checkpoint = 1;
- l_ptr->next_out_no = 1;
- l_ptr->fsm_msg_cnt = 0;
+ l_ptr->snd_nxt = 1;
+ l_ptr->silent_intv_cnt = 0;
l_ptr->stale_count = 0;
link_reset_statistics(l_ptr);
}
-void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *l_ptr;
- struct tipc_node *n_ptr;
-
- rcu_read_lock();
- list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
- tipc_node_lock(n_ptr);
- l_ptr = n_ptr->links[bearer_id];
- if (l_ptr)
- tipc_link_reset(l_ptr);
- tipc_node_unlock(n_ptr);
- }
- rcu_read_unlock();
-}
-
static void link_activate(struct tipc_link *link)
{
struct tipc_node *node = link->owner;
- link->next_in_no = 1;
+ link->rcv_nxt = 1;
link->stats.recv_info = 1;
+ link->silent_intv_cnt = 0;
tipc_node_link_up(node, link);
tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
}
@@ -511,7 +494,7 @@ static void link_activate(struct tipc_link *link)
static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
{
struct tipc_link *other;
- unsigned long cont_intv = l_ptr->cont_intv;
+ unsigned long timer_intv = l_ptr->keepalive_intv;
if (l_ptr->flags & LINK_STOPPED)
return;
@@ -519,45 +502,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
return; /* Not yet. */
- if (l_ptr->flags & LINK_FAILINGOVER) {
- if (event == TIMEOUT_EVT)
- link_set_timer(l_ptr, cont_intv);
+ if (l_ptr->flags & LINK_FAILINGOVER)
return;
- }
switch (l_ptr->state) {
case WORKING_WORKING:
switch (event) {
case TRAFFIC_MSG_EVT:
case ACTIVATE_MSG:
+ l_ptr->silent_intv_cnt = 0;
break;
- case TIMEOUT_EVT:
- if (l_ptr->next_in_no != l_ptr->checkpoint) {
- l_ptr->checkpoint = l_ptr->next_in_no;
- if (tipc_bclink_acks_missing(l_ptr->owner)) {
+ case SILENCE_EVT:
+ if (!l_ptr->silent_intv_cnt) {
+ if (tipc_bclink_acks_missing(l_ptr->owner))
tipc_link_proto_xmit(l_ptr, STATE_MSG,
0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- }
- link_set_timer(l_ptr, cont_intv);
break;
}
l_ptr->state = WORKING_UNKNOWN;
- l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv / 4);
break;
case RESET_MSG:
pr_debug("%s<%s>, requested by peer\n",
link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
- l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
break;
default:
pr_debug("%s%u in WW state\n", link_unk_evt, event);
@@ -568,46 +539,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
case TRAFFIC_MSG_EVT:
case ACTIVATE_MSG:
l_ptr->state = WORKING_WORKING;
- l_ptr->fsm_msg_cnt = 0;
- link_set_timer(l_ptr, cont_intv);
+ l_ptr->silent_intv_cnt = 0;
break;
case RESET_MSG:
pr_debug("%s<%s>, requested by peer while probing\n",
link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
- l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
break;
- case TIMEOUT_EVT:
- if (l_ptr->next_in_no != l_ptr->checkpoint) {
+ case SILENCE_EVT:
+ if (!l_ptr->silent_intv_cnt) {
l_ptr->state = WORKING_WORKING;
- l_ptr->fsm_msg_cnt = 0;
- l_ptr->checkpoint = l_ptr->next_in_no;
- if (tipc_bclink_acks_missing(l_ptr->owner)) {
+ if (tipc_bclink_acks_missing(l_ptr->owner))
tipc_link_proto_xmit(l_ptr, STATE_MSG,
0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- }
- link_set_timer(l_ptr, cont_intv);
- } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+ } else if (l_ptr->silent_intv_cnt <
+ l_ptr->abort_limit) {
tipc_link_proto_xmit(l_ptr, STATE_MSG,
1, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
pr_debug("%s<%s>, peer not responding\n",
link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
- l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, RESET_MSG,
0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
}
break;
default:
@@ -623,31 +581,22 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (other && link_working_unknown(other))
break;
l_ptr->state = WORKING_WORKING;
- l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
tipc_link_sync_xmit(l_ptr);
- link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
l_ptr->state = RESET_RESET;
- l_ptr->fsm_msg_cnt = 0;
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
1, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
break;
case STARTING_EVT:
l_ptr->flags |= LINK_STARTED;
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
+ link_set_timer(l_ptr, timer_intv);
break;
- case TIMEOUT_EVT:
+ case SILENCE_EVT:
tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
break;
default:
pr_err("%s%u in RU state\n", link_unk_evt, event);
@@ -661,21 +610,16 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (other && link_working_unknown(other))
break;
l_ptr->state = WORKING_WORKING;
- l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
tipc_link_sync_xmit(l_ptr);
- link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
break;
- case TIMEOUT_EVT:
+ case SILENCE_EVT:
tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
0, 0, 0, 0);
- l_ptr->fsm_msg_cnt++;
- link_set_timer(l_ptr, cont_intv);
break;
default:
pr_err("%s%u in RR state\n", link_unk_evt, event);
@@ -701,53 +645,58 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
{
struct tipc_msg *msg = buf_msg(skb_peek(list));
unsigned int maxwin = link->window;
- unsigned int imp = msg_importance(msg);
+ unsigned int i, imp = msg_importance(msg);
uint mtu = link->mtu;
- uint ack = mod(link->next_in_no - 1);
- uint seqno = link->next_out_no;
- uint bc_last_in = link->owner->bclink.last_in;
+ u16 ack = mod(link->rcv_nxt - 1);
+ u16 seqno = link->snd_nxt;
+ u16 bc_last_in = link->owner->bclink.last_in;
struct tipc_media_addr *addr = &link->media_addr;
struct sk_buff_head *transmq = &link->transmq;
struct sk_buff_head *backlogq = &link->backlogq;
- struct sk_buff *skb, *tmp;
-
- /* Match backlog limit against msg importance: */
- if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
- return link_schedule_user(link, list);
+ struct sk_buff *skb, *bskb;
+ /* Match msg importance against this and all higher backlog limits: */
+ for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+ if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
+ return link_schedule_user(link, list);
+ }
if (unlikely(msg_size(msg) > mtu)) {
__skb_queue_purge(list);
return -EMSGSIZE;
}
/* Prepare each packet for sending, and add to relevant queue: */
- skb_queue_walk_safe(list, skb, tmp) {
- __skb_unlink(skb, list);
+ while (skb_queue_len(list)) {
+ skb = skb_peek(list);
msg = buf_msg(skb);
msg_set_seqno(msg, seqno);
msg_set_ack(msg, ack);
msg_set_bcast_ack(msg, bc_last_in);
if (likely(skb_queue_len(transmq) < maxwin)) {
+ __skb_dequeue(list);
__skb_queue_tail(transmq, skb);
tipc_bearer_send(net, link->bearer_id, skb, addr);
link->rcv_unacked = 0;
seqno++;
continue;
}
- if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
+ if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
+ kfree_skb(__skb_dequeue(list));
link->stats.sent_bundled++;
continue;
}
- if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
+ if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
+ kfree_skb(__skb_dequeue(list));
+ __skb_queue_tail(backlogq, bskb);
+ link->backlog[msg_importance(buf_msg(bskb))].len++;
link->stats.sent_bundled++;
link->stats.sent_bundles++;
- imp = msg_importance(buf_msg(skb));
+ continue;
}
- __skb_queue_tail(backlogq, skb);
- link->backlog[imp].len++;
- seqno++;
+ link->backlog[imp].len += skb_queue_len(list);
+ skb_queue_splice_tail_init(list, backlogq);
}
- link->next_out_no = seqno;
+ link->snd_nxt = seqno;
return 0;
}
@@ -877,7 +826,8 @@ void tipc_link_push_packets(struct tipc_link *link)
{
struct sk_buff *skb;
struct tipc_msg *msg;
- unsigned int ack = mod(link->next_in_no - 1);
+ u16 seqno = link->snd_nxt;
+ u16 ack = mod(link->rcv_nxt - 1);
while (skb_queue_len(&link->transmq) < link->window) {
skb = __skb_dequeue(&link->backlogq);
@@ -886,12 +836,15 @@ void tipc_link_push_packets(struct tipc_link *link)
msg = buf_msg(skb);
link->backlog[msg_importance(msg)].len--;
msg_set_ack(msg, ack);
+ msg_set_seqno(msg, seqno);
+ seqno = mod(seqno + 1);
msg_set_bcast_ack(msg, link->owner->bclink.last_in);
link->rcv_unacked = 0;
__skb_queue_tail(&link->transmq, skb);
tipc_bearer_send(link->owner->net, link->bearer_id,
skb, &link->media_addr);
}
+ link->snd_nxt = seqno;
}
void tipc_link_reset_all(struct tipc_node *node)
@@ -964,13 +917,13 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
msg = buf_msg(skb);
/* Detect repeated retransmit failures */
- if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+ if (l_ptr->last_retransm == msg_seqno(msg)) {
if (++l_ptr->stale_count > 100) {
link_retransmit_failure(l_ptr, skb);
return;
}
} else {
- l_ptr->last_retransmitted = msg_seqno(msg);
+ l_ptr->last_retransm = msg_seqno(msg);
l_ptr->stale_count = 1;
}
@@ -978,7 +931,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
if (!retransmits)
break;
msg = buf_msg(skb);
- msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+ msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
&l_ptr->media_addr);
@@ -1001,11 +954,11 @@ static bool link_synch(struct tipc_link *l)
goto synched;
/* Was last pre-synch packet added to input queue ? */
- if (less_eq(pl->next_in_no, l->synch_point))
+ if (less_eq(pl->rcv_nxt, l->synch_point))
return false;
/* Is it still in the input queue ? */
- post_synch = mod(pl->next_in_no - l->synch_point) - 1;
+ post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
if (skb_queue_len(&pl->inputq) > post_synch)
return false;
synched:
@@ -1016,13 +969,13 @@ synched:
static void link_retrieve_defq(struct tipc_link *link,
struct sk_buff_head *list)
{
- u32 seq_no;
+ u16 seq_no;
if (skb_queue_empty(&link->deferdq))
return;
seq_no = buf_seqno(skb_peek(&link->deferdq));
- if (seq_no == mod(link->next_in_no))
+ if (seq_no == link->rcv_nxt)
skb_queue_splice_tail_init(&link->deferdq, list);
}
@@ -1043,8 +996,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
struct tipc_link *l_ptr;
struct sk_buff *skb1, *tmp;
struct tipc_msg *msg;
- u32 seq_no;
- u32 ackd;
+ u16 seq_no;
+ u16 ackd;
u32 released;
skb2list(skb, &head);
@@ -1137,18 +1090,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
}
/* Link is now in state WORKING_WORKING */
- if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
+ if (unlikely(seq_no != l_ptr->rcv_nxt)) {
link_handle_out_of_seq_msg(l_ptr, skb);
link_retrieve_defq(l_ptr, &head);
skb = NULL;
goto unlock;
}
+ l_ptr->silent_intv_cnt = 0;
+
/* Synchronize with parallel link if applicable */
if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
if (!link_synch(l_ptr))
goto unlock;
}
- l_ptr->next_in_no++;
+ l_ptr->rcv_nxt++;
if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
link_retrieve_defq(l_ptr, &head);
if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
@@ -1268,7 +1223,7 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
{
struct sk_buff *skb1;
- u32 seq_no = buf_seqno(skb);
+ u16 seq_no = buf_seqno(skb);
/* Empty queue ? */
if (skb_queue_empty(list)) {
@@ -1284,7 +1239,7 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
/* Locate insertion point in queue, then insert; discard if duplicate */
skb_queue_walk(list, skb1) {
- u32 curr_seqno = buf_seqno(skb1);
+ u16 curr_seqno = buf_seqno(skb1);
if (seq_no == curr_seqno) {
kfree_skb(skb);
@@ -1312,14 +1267,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
return;
}
- /* Record OOS packet arrival (force mismatch on next timeout) */
- l_ptr->checkpoint--;
+ /* Record OOS packet arrival */
+ l_ptr->silent_intv_cnt = 0;
/*
* Discard packet if a duplicate; otherwise add it to deferred queue
* and notify peer of gap as per protocol specification
*/
- if (less(seq_no, mod(l_ptr->next_in_no))) {
+ if (less(seq_no, l_ptr->rcv_nxt)) {
l_ptr->stats.duplicates++;
kfree_skb(buf);
return;
@@ -1344,6 +1299,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
struct tipc_msg *msg = l_ptr->pmsg;
u32 msg_size = sizeof(l_ptr->proto_msg);
int r_flag;
+ u16 last_rcv;
/* Don't send protocol message during link failover */
if (l_ptr->flags & LINK_FAILINGOVER)
@@ -1360,16 +1316,14 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
if (msg_typ == STATE_MSG) {
- u32 next_sent = mod(l_ptr->next_out_no);
+ u16 next_sent = l_ptr->snd_nxt;
if (!tipc_link_is_up(l_ptr))
return;
- if (skb_queue_len(&l_ptr->backlogq))
- next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
msg_set_next_sent(msg, next_sent);
if (!skb_queue_empty(&l_ptr->deferdq)) {
- u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
- gap = mod(rec - mod(l_ptr->next_in_no));
+ last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
+ gap = mod(last_rcv - l_ptr->rcv_nxt);
}
msg_set_seq_gap(msg, gap);
if (gap)
@@ -1377,7 +1331,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_link_tolerance(msg, tolerance);
msg_set_linkprio(msg, priority);
msg_set_max_pkt(msg, l_ptr->mtu);
- msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+ msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
msg_set_probe(msg, probe_msg != 0);
if (probe_msg)
l_ptr->stats.sent_probes++;
@@ -1397,7 +1351,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
msg_set_linkprio(msg, l_ptr->priority);
msg_set_size(msg, msg_size);
- msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
+ msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
buf = tipc_buf_acquire(msg_size);
if (!buf)
@@ -1496,17 +1450,15 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
}
/* Record reception; force mismatch at next timeout: */
- l_ptr->checkpoint--;
+ l_ptr->silent_intv_cnt = 0;
link_state_event(l_ptr, TRAFFIC_MSG_EVT);
l_ptr->stats.recv_states++;
if (link_reset_unknown(l_ptr))
break;
- if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
- rec_gap = mod(msg_next_sent(msg) -
- mod(l_ptr->next_in_no));
- }
+ if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
+ rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
if (msg_probe(msg))
l_ptr->stats.recv_probes++;
@@ -1580,6 +1532,11 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
+
+ skb_queue_walk(&l_ptr->backlogq, skb) {
+ msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
+ l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
+ }
skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
tipc_link_purge_backlog(l_ptr);
msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1640,6 +1597,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
struct tipc_msg tnl_hdr;
struct sk_buff_head *queue = &link->transmq;
int mcnt;
+ u16 seqno;
tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
SYNCH_MSG, INT_H_SIZE, link->addr);
@@ -1653,7 +1611,7 @@ tunnel_queue:
struct tipc_msg *msg = buf_msg(skb);
u32 len = msg_size(msg);
- msg_set_ack(msg, mod(link->next_in_no - 1));
+ msg_set_ack(msg, mod(link->rcv_nxt - 1));
msg_set_bcast_ack(msg, link->owner->bclink.last_in);
msg_set_size(&tnl_hdr, len + INT_H_SIZE);
outskb = tipc_buf_acquire(len + INT_H_SIZE);
@@ -1671,6 +1629,11 @@ tunnel_queue:
}
if (queue == &link->backlogq)
return;
+ seqno = link->snd_nxt;
+ skb_queue_walk(&link->backlogq, skb) {
+ msg_set_seqno(buf_msg(skb), seqno);
+ seqno = mod(seqno + 1);
+ }
queue = &link->backlogq;
goto tunnel_queue;
}
@@ -1742,8 +1705,8 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
return;
l_ptr->tolerance = tol;
- l_ptr->cont_intv = msecs_to_jiffies(intv);
- l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
+ l_ptr->keepalive_intv = msecs_to_jiffies(intv);
+ l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
}
void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1803,8 +1766,8 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
static void link_reset_statistics(struct tipc_link *l_ptr)
{
memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
- l_ptr->stats.sent_info = l_ptr->next_out_no;
- l_ptr->stats.recv_info = l_ptr->next_in_no;
+ l_ptr->stats.sent_info = l_ptr->snd_nxt;
+ l_ptr->stats.recv_info = l_ptr->rcv_nxt;
}
static void link_print(struct tipc_link *l_ptr, const char *str)
@@ -1893,6 +1856,9 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+ if (strcmp(name, tipc_bclink_name) == 0)
+ return tipc_nl_bc_link_set(net, attrs);
+
node = tipc_link_find_owner(net, name, &bearer_id);
if (!node)
return -EINVAL;
@@ -2034,9 +2000,9 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
goto attr_msg_full;
if (tipc_link_is_up(link))
@@ -2175,50 +2141,53 @@ out:
int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
- struct sk_buff *ans_skb;
struct tipc_nl_msg msg;
- struct tipc_link *link;
- struct tipc_node *node;
char *name;
- int bearer_id;
int err;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
if (!info->attrs[TIPC_NLA_LINK_NAME])
return -EINVAL;
-
name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
- node = tipc_link_find_owner(net, name, &bearer_id);
- if (!node)
- return -EINVAL;
- ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!ans_skb)
+ msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg.skb)
return -ENOMEM;
- msg.skb = ans_skb;
- msg.portid = info->snd_portid;
- msg.seq = info->snd_seq;
-
- tipc_node_lock(node);
- link = node->links[bearer_id];
- if (!link) {
- err = -EINVAL;
- goto err_out;
- }
-
- err = __tipc_nl_add_link(net, &msg, link, 0);
- if (err)
- goto err_out;
+ if (strcmp(name, tipc_bclink_name) == 0) {
+ err = tipc_nl_add_bc_link(net, &msg);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+ } else {
+ int bearer_id;
+ struct tipc_node *node;
+ struct tipc_link *link;
- tipc_node_unlock(node);
+ node = tipc_link_find_owner(net, name, &bearer_id);
+ if (!node)
+ return -EINVAL;
- return genlmsg_reply(ans_skb, info);
+ tipc_node_lock(node);
+ link = node->links[bearer_id];
+ if (!link) {
+ tipc_node_unlock(node);
+ nlmsg_free(msg.skb);
+ return -EINVAL;
+ }
-err_out:
- tipc_node_unlock(node);
- nlmsg_free(ans_skb);
+ err = __tipc_nl_add_link(net, &msg, link, 0);
+ tipc_node_unlock(node);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+ }
- return err;
+ return genlmsg_reply(msg.skb, info);
}
int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/tipc/link.h b/net/tipc/link.h
index b5b4e3554d4e..0c02c973e985 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -107,30 +107,29 @@ struct tipc_stats {
* @owner: pointer to peer node
* @refcnt: reference counter for permanent references (owner node & timer)
* @flags: execution state flags for link endpoint instance
- * @checkpoint: reference point for triggering link continuity checking
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
* @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @cont_intv: link continuity testing interval
+ * @keepalive_intv: link keepalive timer interval
* @abort_limit: # of unacknowledged continuity probes needed to reset link
* @state: current state of link FSM
- * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
+ * @silent_intv_cnt: # of timer intervals without any reception from peer
* @proto_msg: template for control messages generated by link
* @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
* @net_plane: current link network plane ('A' through 'H')
* @backlog_limit: backlog queue congestion thresholds (indexed by importance)
* @exp_msg_count: # of tunnelled messages expected during link changeover
- * @reset_checkpoint: seq # of last acknowledged message at time of link reset
+ * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
* @mtu: current maximum packet size for this link
* @advertised_mtu: advertised own mtu when link is being established
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
- * @next_out_no: next sequence number to use for outbound messages
+ * @snt_nxt: next sequence number to use for outbound messages
* @last_retransmitted: sequence number of most recently retransmitted message
* @stale_count: # of identical retransmit requests made by peer
- * @next_in_no: next sequence number to expect for inbound messages
+ * @rcv_nxt: next sequence number to expect for inbound messages
* @deferred_queue: deferred queue saved OOS b'cast message received from node
* @unacked_window: # of inbound messages rx'd without ack'ing back to peer
* @inputq: buffer queue for messages to be delivered upwards
@@ -151,15 +150,14 @@ struct tipc_link {
/* Management and link supervision data */
unsigned int flags;
- u32 checkpoint;
u32 peer_session;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
- unsigned long cont_intv;
+ unsigned long keepalive_intv;
u32 abort_limit;
int state;
- u32 fsm_msg_cnt;
+ u32 silent_intv_cnt;
struct {
unchar hdr[INT_H_SIZE];
unchar body[TIPC_MAX_IF_NAME];
@@ -185,13 +183,13 @@ struct tipc_link {
u16 len;
u16 limit;
} backlog[5];
- u32 next_out_no;
+ u16 snd_nxt;
+ u16 last_retransm;
u32 window;
- u32 last_retransmitted;
u32 stale_count;
/* Reception */
- u32 next_in_no;
+ u16 rcv_nxt;
u32 rcv_unacked;
struct sk_buff_head deferdq;
struct sk_buff_head inputq;
@@ -213,8 +211,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
- bool shutting_down);
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
@@ -223,7 +220,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr);
void tipc_link_purge_queues(struct tipc_link *l_ptr);
void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr);
-void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
u32 selector);
int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
@@ -247,39 +243,6 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
void link_prepare_wakeup(struct tipc_link *l);
-/*
- * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
- */
-static inline u32 buf_seqno(struct sk_buff *buf)
-{
- return msg_seqno(buf_msg(buf));
-}
-
-static inline u32 mod(u32 x)
-{
- return x & 0xffffu;
-}
-
-static inline int less_eq(u32 left, u32 right)
-{
- return mod(right - left) < 32768u;
-}
-
-static inline int more(u32 left, u32 right)
-{
- return !less_eq(left, right);
-}
-
-static inline int less(u32 left, u32 right)
-{
- return less_eq(left, right) && (mod(right) != mod(left));
-}
-
-static inline u32 lesser(u32 left, u32 right)
-{
- return less_eq(left, right) ? left : right;
-}
-
static inline u32 link_own_addr(struct tipc_link *l)
{
return msg_prevnode(l->pmsg);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index c3e96e815418..08b4cc7d496d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -331,16 +331,15 @@ error:
/**
* tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @bskb: the buffer to append to ("bundle")
- * @skb: buffer to be appended
+ * @skb: the buffer to append to ("bundle")
+ * @msg: message to be appended
* @mtu: max allowable size for the bundle buffer
* Consumes buffer if successful
* Returns true if bundling could be performed, otherwise false
*/
-bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
{
struct tipc_msg *bmsg;
- struct tipc_msg *msg = buf_msg(skb);
unsigned int bsz;
unsigned int msz = msg_size(msg);
u32 start, pad;
@@ -348,9 +347,9 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
if (likely(msg_user(msg) == MSG_FRAGMENTER))
return false;
- if (!bskb)
+ if (!skb)
return false;
- bmsg = buf_msg(bskb);
+ bmsg = buf_msg(skb);
bsz = msg_size(bmsg);
start = align(bsz);
pad = start - bsz;
@@ -359,18 +358,20 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
return false;
if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
return false;
- if (likely(msg_user(bmsg) != MSG_BUNDLER))
+ if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
return false;
- if (unlikely(skb_tailroom(bskb) < (pad + msz)))
+ if (unlikely(skb_tailroom(skb) < (pad + msz)))
return false;
if (unlikely(max < (start + msz)))
return false;
+ if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
+ (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+ return false;
- skb_put(bskb, pad + msz);
- skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
+ skb_put(skb, pad + msz);
+ skb_copy_to_linear_data_offset(skb, start, msg, msz);
msg_set_size(bmsg, start + msz);
msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
- kfree_skb(skb);
return true;
}
@@ -416,18 +417,18 @@ none:
/**
* tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain
- * @skb: buffer to be appended and replaced
+ * @list: the buffer chain, where head is the buffer to replace/append
+ * @skb: buffer to be created, appended to and returned in case of success
+ * @msg: message to be appended
* @mtu: max allowable size for the bundle buffer, inclusive header
* @dnode: destination node for message. (Not always present in header)
- * Replaces buffer if successful
* Returns true if success, otherwise false
*/
-bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
+ u32 mtu, u32 dnode)
{
- struct sk_buff *bskb;
+ struct sk_buff *_skb;
struct tipc_msg *bmsg;
- struct tipc_msg *msg = buf_msg(*skb);
u32 msz = msg_size(msg);
u32 max = mtu - INT_H_SIZE;
@@ -440,19 +441,23 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
if (msz > (max / 2))
return false;
- bskb = tipc_buf_acquire(max);
- if (!bskb)
+ _skb = tipc_buf_acquire(max);
+ if (!_skb)
return false;
- skb_trim(bskb, INT_H_SIZE);
- bmsg = buf_msg(bskb);
+ skb_trim(_skb, INT_H_SIZE);
+ bmsg = buf_msg(_skb);
tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
INT_H_SIZE, dnode);
+ if (msg_isdata(msg))
+ msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+ else
+ msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
msg_set_seqno(bmsg, msg_seqno(msg));
msg_set_ack(bmsg, msg_ack(msg));
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
- tipc_msg_bundle(bskb, *skb, mtu);
- *skb = bskb;
+ tipc_msg_bundle(_skb, msg, mtu);
+ *skb = _skb;
return true;
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index e1d3595e2ee9..19c45fb66238 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -313,12 +313,12 @@ static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 19, 0x3, n);
}
-static inline u32 msg_bcast_ack(struct tipc_msg *m)
+static inline u16 msg_bcast_ack(struct tipc_msg *m)
{
return msg_bits(m, 1, 0, 0xffff);
}
-static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 1, 0, 0xffff, n);
}
@@ -327,22 +327,22 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
/*
* Word 2
*/
-static inline u32 msg_ack(struct tipc_msg *m)
+static inline u16 msg_ack(struct tipc_msg *m)
{
return msg_bits(m, 2, 16, 0xffff);
}
-static inline void msg_set_ack(struct tipc_msg *m, u32 n)
+static inline void msg_set_ack(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 2, 16, 0xffff, n);
}
-static inline u32 msg_seqno(struct tipc_msg *m)
+static inline u16 msg_seqno(struct tipc_msg *m)
{
return msg_bits(m, 2, 0, 0xffff);
}
-static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
+static inline void msg_set_seqno(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 2, 0, 0xffff, n);
}
@@ -352,18 +352,22 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
*/
static inline u32 msg_importance(struct tipc_msg *m)
{
- if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+ int usr = msg_user(m);
+
+ if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
+ return usr;
+ if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
return msg_bits(m, 5, 13, 0x7);
- if (likely(msg_isdata(m) && !msg_errcode(m)))
- return msg_user(m);
return TIPC_SYSTEM_IMPORTANCE;
}
static inline void msg_set_importance(struct tipc_msg *m, u32 i)
{
- if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+ int usr = msg_user(m);
+
+ if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
msg_set_bits(m, 5, 13, 0x7, i);
- else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
+ else if (i < TIPC_SYSTEM_IMPORTANCE)
msg_set_user(m, i);
else
pr_warn("Trying to set illegal importance in message\n");
@@ -772,9 +776,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
uint data_sz, u32 dnode, u32 onode,
u32 dport, u32 oport, int errcode);
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu);
-
-bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode);
+bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
+bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
+ u32 mtu, u32 dnode);
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
@@ -782,6 +786,11 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
int *err);
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
+static inline u16 buf_seqno(struct sk_buff *skb)
+{
+ return msg_seqno(buf_msg(skb));
+}
+
/* tipc_skb_peek(): peek and reserve first buffer in list
* @list: list to be peeked in
* Returns pointer to first buffer in list, if any
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index ab0ac62a1287..0f47f08bf38f 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -330,13 +330,9 @@ static struct publication *tipc_nameseq_insert_publ(struct net *net,
/* Any subscriptions waiting for notification? */
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
- tipc_subscr_report_overlap(s,
- publ->lower,
- publ->upper,
- TIPC_PUBLISHED,
- publ->ref,
- publ->node,
- created_subseq);
+ tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
+ TIPC_PUBLISHED, publ->ref,
+ publ->node, created_subseq);
}
return publ;
}
@@ -404,13 +400,9 @@ found:
/* Notify any waiting subscriptions */
list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
- tipc_subscr_report_overlap(s,
- publ->lower,
- publ->upper,
- TIPC_WITHDRAWN,
- publ->ref,
- publ->node,
- removed_subseq);
+ tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
+ TIPC_WITHDRAWN, publ->ref,
+ publ->node, removed_subseq);
}
return publ;
@@ -432,19 +424,17 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
return;
while (sseq != &nseq->sseqs[nseq->first_free]) {
- if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
+ if (tipc_subscrp_check_overlap(s, sseq->lower, sseq->upper)) {
struct publication *crs;
struct name_info *info = sseq->info;
int must_report = 1;
list_for_each_entry(crs, &info->zone_list, zone_list) {
- tipc_subscr_report_overlap(s,
- sseq->lower,
- sseq->upper,
- TIPC_PUBLISHED,
- crs->ref,
- crs->node,
- must_report);
+ tipc_subscrp_report_overlap(s, sseq->lower,
+ sseq->upper,
+ TIPC_PUBLISHED,
+ crs->ref, crs->node,
+ must_report);
must_report = 0;
}
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a54f3cbe2246..d6d1399ae229 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -40,6 +40,7 @@
#include "subscr.h"
#include "socket.h"
#include "node.h"
+#include "bcast.h"
static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index ce9121e8e990..53e0fee80086 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
int rep_type;
int rep_size;
int req_type;
+ struct net *net;
struct sk_buff *rep;
struct tlv_desc *req;
struct sock *dst_sk;
@@ -68,7 +69,8 @@ struct tipc_nl_compat_cmd_dump {
struct tipc_nl_compat_cmd_doit {
int (*doit)(struct sk_buff *skb, struct genl_info *info);
- int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
+ int (*transcode)(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
};
static int tipc_skb_tailroom(struct sk_buff *skb)
@@ -281,7 +283,7 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
if (!trans_buf)
return -ENOMEM;
- err = (*cmd->transcode)(trans_buf, msg);
+ err = (*cmd->transcode)(cmd, trans_buf, msg);
if (err)
goto trans_out;
@@ -353,7 +355,8 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
nla_len(bearer[TIPC_NLA_BEARER_NAME]));
}
-static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
+static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
struct nlattr *prop;
@@ -385,7 +388,8 @@ static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
return 0;
}
-static int tipc_nl_compat_bearer_disable(struct sk_buff *skb,
+static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
char *name;
@@ -576,11 +580,81 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
&link_info, sizeof(link_info));
}
-static int tipc_nl_compat_link_set(struct sk_buff *skb,
- struct tipc_nl_compat_msg *msg)
+static int __tipc_add_link_prop(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg,
+ struct tipc_link_config *lc)
+{
+ switch (msg->cmd) {
+ case TIPC_CMD_SET_LINK_PRI:
+ return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
+ case TIPC_CMD_SET_LINK_TOL:
+ return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
+ case TIPC_CMD_SET_LINK_WINDOW:
+ return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
+ }
+
+ return -EINVAL;
+}
+
+static int tipc_nl_compat_media_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
{
- struct nlattr *link;
struct nlattr *prop;
+ struct nlattr *media;
+ struct tipc_link_config *lc;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ media = nla_nest_start(skb, TIPC_NLA_MEDIA);
+ if (!media)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
+ return -EMSGSIZE;
+
+ prop = nla_nest_start(skb, TIPC_NLA_MEDIA_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+
+ __tipc_add_link_prop(skb, msg, lc);
+ nla_nest_end(skb, prop);
+ nla_nest_end(skb, media);
+
+ return 0;
+}
+
+static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *bearer;
+ struct tipc_link_config *lc;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+ if (!bearer)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
+ return -EMSGSIZE;
+
+ prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
+ if (!prop)
+ return -EMSGSIZE;
+
+ __tipc_add_link_prop(skb, msg, lc);
+ nla_nest_end(skb, prop);
+ nla_nest_end(skb, bearer);
+
+ return 0;
+}
+
+static int __tipc_nl_compat_link_set(struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct nlattr *prop;
+ struct nlattr *link;
struct tipc_link_config *lc;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -596,24 +670,40 @@ static int tipc_nl_compat_link_set(struct sk_buff *skb,
if (!prop)
return -EMSGSIZE;
- if (msg->cmd == TIPC_CMD_SET_LINK_PRI) {
- if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value)))
- return -EMSGSIZE;
- } else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) {
- if (nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value)))
- return -EMSGSIZE;
- } else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) {
- if (nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value)))
- return -EMSGSIZE;
- }
-
+ __tipc_add_link_prop(skb, msg, lc);
nla_nest_end(skb, prop);
nla_nest_end(skb, link);
return 0;
}
-static int tipc_nl_compat_link_reset_stats(struct sk_buff *skb,
+static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
+ struct tipc_nl_compat_msg *msg)
+{
+ struct tipc_link_config *lc;
+ struct tipc_bearer *bearer;
+ struct tipc_media *media;
+
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+ media = tipc_media_find(lc->name);
+ if (media) {
+ cmd->doit = &tipc_nl_media_set;
+ return tipc_nl_compat_media_set(skb, msg);
+ }
+
+ bearer = tipc_bearer_find(msg->net, lc->name);
+ if (bearer) {
+ cmd->doit = &tipc_nl_bearer_set;
+ return tipc_nl_compat_bearer_set(skb, msg);
+ }
+
+ return __tipc_nl_compat_link_set(skb, msg);
+}
+
+static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
char *name;
@@ -851,7 +941,8 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
sizeof(node_info));
}
-static int tipc_nl_compat_net_set(struct sk_buff *skb,
+static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd,
+ struct sk_buff *skb,
struct tipc_nl_compat_msg *msg)
{
u32 val;
@@ -1007,7 +1098,6 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
struct nlmsghdr *req_nlh;
struct nlmsghdr *rep_nlh;
struct tipc_genlmsghdr *req_userhdr = info->userhdr;
- struct net *net = genl_info_net(info);
memset(&msg, 0, sizeof(msg));
@@ -1015,6 +1105,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
msg.cmd = req_userhdr->cmd;
msg.dst_sk = info->dst_sk;
+ msg.net = genl_info_net(info);
if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
@@ -1030,7 +1121,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
}
err = tipc_nl_compat_handle(&msg);
- if (err == -EOPNOTSUPP)
+ if ((err == -EOPNOTSUPP) || (err == -EPERM))
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
else if (err == -EINVAL)
msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
@@ -1043,7 +1134,7 @@ send:
rep_nlh = nlmsg_hdr(msg.rep);
memcpy(rep_nlh, info->nlhdr, len);
rep_nlh->nlmsg_len = msg.rep->len;
- genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid);
+ genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid);
return err;
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 22c059ad2999..0b1d61a5f853 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,7 +1,7 @@
/*
* net/tipc/node.c: TIPC node management routines
*
- * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
* Copyright (c) 2005-2006, 2010-2014, Wind River Systems
* All rights reserved.
*
@@ -39,6 +39,7 @@
#include "node.h"
#include "name_distr.h"
#include "socket.h"
+#include "bcast.h"
static void node_lost_contact(struct tipc_node *n_ptr);
static void node_established_contact(struct tipc_node *n_ptr);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 02d5c20dc551..5a834cf142c8 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,8 +45,6 @@
/* Out-of-range value for node signature */
#define INVALID_NODE_SIG 0x10000
-#define NODE_HTABLE_SIZE 512
-
/* Flags used to take different actions according to flag type
* TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
* TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 77ff03ed1e18..922e04a43396 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -309,6 +309,10 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
/* Notify that new connection is incoming */
newcon->usr_data = s->tipc_conn_new(newcon->conid);
+ if (!newcon->usr_data) {
+ sock_release(newsock);
+ return -ENOMEM;
+ }
/* Wake up receive process in case of 'SYN+' message */
newsock->sk->sk_data_ready(newsock->sk);
@@ -321,7 +325,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
struct socket *sock = NULL;
int ret;
- ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
+ ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
if (ret < 0)
return NULL;
ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f485600c4507..46b6ed534ef2 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -41,6 +41,7 @@
#include "link.h"
#include "name_distr.h"
#include "socket.h"
+#include "bcast.h"
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
@@ -342,7 +343,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
}
/* Allocate socket's protocol area */
- sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
+ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
if (sk == NULL)
return -ENOMEM;
@@ -409,7 +410,7 @@ static int tipc_release(struct socket *sock)
struct net *net;
struct tipc_sock *tsk;
struct sk_buff *skb;
- u32 dnode, probing_state;
+ u32 dnode;
/*
* Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -447,10 +448,7 @@ static int tipc_release(struct socket *sock)
}
tipc_sk_withdraw(tsk, 0, NULL);
- probing_state = tsk->probing_state;
- if (del_timer_sync(&sk->sk_timer) &&
- probing_state != TIPC_CONN_PROBING)
- sock_put(sk);
+ sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
if (tsk->connected) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 1c147c869c2e..350cca33ee0a 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -40,16 +40,21 @@
/**
* struct tipc_subscriber - TIPC network topology subscriber
+ * @kref: reference counter to tipc_subscription object
* @conid: connection identifier to server connecting to subscriber
* @lock: control access to subscriber
- * @subscription_list: list of subscription objects for this subscriber
+ * @subscrp_list: list of subscription objects for this subscriber
*/
struct tipc_subscriber {
+ struct kref kref;
int conid;
spinlock_t lock;
- struct list_head subscription_list;
+ struct list_head subscrp_list;
};
+static void tipc_subscrp_delete(struct tipc_subscription *sub);
+static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+
/**
* htohl - convert value to endianness used by destination
* @in: value to convert
@@ -62,9 +67,9 @@ static u32 htohl(u32 in, int swap)
return swap ? swab32(in) : in;
}
-static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
- u32 found_upper, u32 event, u32 port_ref,
- u32 node)
+static void tipc_subscrp_send_event(struct tipc_subscription *sub,
+ u32 found_lower, u32 found_upper,
+ u32 event, u32 port_ref, u32 node)
{
struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
struct tipc_subscriber *subscriber = sub->subscriber;
@@ -82,12 +87,13 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
}
/**
- * tipc_subscr_overlap - test for subscription overlap with the given values
+ * tipc_subscrp_check_overlap - test for subscription overlap with the
+ * given values
*
* Returns 1 if there is overlap, otherwise 0.
*/
-int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
- u32 found_upper)
+int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+ u32 found_upper)
{
if (found_lower < sub->seq.lower)
found_lower = sub->seq.lower;
@@ -98,138 +104,121 @@ int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
return 1;
}
-/**
- * tipc_subscr_report_overlap - issue event if there is subscription overlap
- *
- * Protected by nameseq.lock in name_table.c
- */
-void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
- u32 found_upper, u32 event, u32 port_ref,
- u32 node, int must)
+void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+ u32 found_upper, u32 event, u32 port_ref,
+ u32 node, int must)
{
- if (!tipc_subscr_overlap(sub, found_lower, found_upper))
+ if (!tipc_subscrp_check_overlap(sub, found_lower, found_upper))
return;
if (!must && !(sub->filter & TIPC_SUB_PORTS))
return;
- subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
+ tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
+ node);
}
-static void subscr_timeout(unsigned long data)
+static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
struct tipc_subscriber *subscriber = sub->subscriber;
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
- /* The spin lock per subscriber is used to protect its members */
- spin_lock_bh(&subscriber->lock);
+ /* Notify subscriber of timeout */
+ tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
+ TIPC_SUBSCR_TIMEOUT, 0, 0);
- /* Validate timeout (in case subscription is being cancelled) */
- if (sub->timeout == TIPC_WAIT_FOREVER) {
- spin_unlock_bh(&subscriber->lock);
- return;
- }
+ spin_lock_bh(&subscriber->lock);
+ tipc_subscrp_delete(sub);
+ spin_unlock_bh(&subscriber->lock);
- /* Unlink subscription from name table */
- tipc_nametbl_unsubscribe(sub);
+ tipc_subscrb_put(subscriber);
+}
- /* Unlink subscription from subscriber */
- list_del(&sub->subscription_list);
+static void tipc_subscrb_kref_release(struct kref *kref)
+{
+ struct tipc_subscriber *subcriber = container_of(kref,
+ struct tipc_subscriber, kref);
- spin_unlock_bh(&subscriber->lock);
+ kfree(subcriber);
+}
- /* Notify subscriber of timeout */
- subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
- TIPC_SUBSCR_TIMEOUT, 0, 0);
+static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
+{
+ kref_put(&subscriber->kref, tipc_subscrb_kref_release);
+}
- /* Now destroy subscription */
- kfree(sub);
- atomic_dec(&tn->subscription_count);
+static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
+{
+ kref_get(&subscriber->kref);
}
-/**
- * subscr_del - delete a subscription within a subscription list
- *
- * Called with subscriber lock held.
- */
-static void subscr_del(struct tipc_subscription *sub)
+static struct tipc_subscriber *tipc_subscrb_create(int conid)
{
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ struct tipc_subscriber *subscriber;
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscription_list);
- kfree(sub);
- atomic_dec(&tn->subscription_count);
+ subscriber = kzalloc(sizeof(*subscriber), GFP_ATOMIC);
+ if (!subscriber) {
+ pr_warn("Subscriber rejected, no memory\n");
+ return NULL;
+ }
+ kref_init(&subscriber->kref);
+ INIT_LIST_HEAD(&subscriber->subscrp_list);
+ subscriber->conid = conid;
+ spin_lock_init(&subscriber->lock);
+
+ return subscriber;
}
-static void subscr_release(struct tipc_subscriber *subscriber)
+static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub;
- struct tipc_subscription *sub_temp;
+ struct tipc_subscription *sub, *temp;
spin_lock_bh(&subscriber->lock);
-
/* Destroy any existing subscriptions for subscriber */
- list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
- subscription_list) {
- if (sub->timeout != TIPC_WAIT_FOREVER) {
- spin_unlock_bh(&subscriber->lock);
- del_timer_sync(&sub->timer);
- spin_lock_bh(&subscriber->lock);
+ list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+ subscrp_list) {
+ if (del_timer(&sub->timer)) {
+ tipc_subscrp_delete(sub);
+ tipc_subscrb_put(subscriber);
}
- subscr_del(sub);
}
spin_unlock_bh(&subscriber->lock);
- /* Now destroy subscriber */
- kfree(subscriber);
+ tipc_subscrb_put(subscriber);
}
-/**
- * subscr_cancel - handle subscription cancellation request
- *
- * Called with subscriber lock held. Routine must temporarily release lock
- * to enable the subscription timeout routine to finish without deadlocking;
- * the lock is then reclaimed to allow caller to release it upon return.
- *
- * Note that fields of 's' use subscriber's endianness!
- */
-static void subscr_cancel(struct tipc_subscr *s,
- struct tipc_subscriber *subscriber)
+static void tipc_subscrp_delete(struct tipc_subscription *sub)
{
- struct tipc_subscription *sub;
- struct tipc_subscription *sub_temp;
- int found = 0;
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
+ tipc_nametbl_unsubscribe(sub);
+ list_del(&sub->subscrp_list);
+ kfree(sub);
+ atomic_dec(&tn->subscription_count);
+}
+static void tipc_subscrp_cancel(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber)
+{
+ struct tipc_subscription *sub, *temp;
+
+ spin_lock_bh(&subscriber->lock);
/* Find first matching subscription, exit if not found */
- list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
- subscription_list) {
+ list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+ subscrp_list) {
if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- found = 1;
+ if (del_timer(&sub->timer)) {
+ tipc_subscrp_delete(sub);
+ tipc_subscrb_put(subscriber);
+ }
break;
}
}
- if (!found)
- return;
-
- /* Cancel subscription timer (if used), then delete subscription */
- if (sub->timeout != TIPC_WAIT_FOREVER) {
- sub->timeout = TIPC_WAIT_FOREVER;
- spin_unlock_bh(&subscriber->lock);
- del_timer_sync(&sub->timer);
- spin_lock_bh(&subscriber->lock);
- }
- subscr_del(sub);
+ spin_unlock_bh(&subscriber->lock);
}
-/**
- * subscr_subscribe - create subscription for subscriber
- *
- * Called with subscriber lock held.
- */
-static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
- struct tipc_subscriber *subscriber,
- struct tipc_subscription **sub_p)
+static int tipc_subscrp_create(struct net *net, struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber,
+ struct tipc_subscription **sub_p)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_subscription *sub;
@@ -241,7 +230,7 @@ static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
/* Detect & process a subscription cancellation request */
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
- subscr_cancel(s, subscriber);
+ tipc_subscrp_cancel(s, subscriber);
return 0;
}
@@ -273,62 +262,51 @@ static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
kfree(sub);
return -EINVAL;
}
- list_add(&sub->subscription_list, &subscriber->subscription_list);
+ spin_lock_bh(&subscriber->lock);
+ list_add(&sub->subscrp_list, &subscriber->subscrp_list);
+ spin_unlock_bh(&subscriber->lock);
sub->subscriber = subscriber;
sub->swap = swap;
- memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
+ memcpy(&sub->evt.s, s, sizeof(*s));
atomic_inc(&tn->subscription_count);
- if (sub->timeout != TIPC_WAIT_FOREVER) {
- setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
- mod_timer(&sub->timer, jiffies + sub->timeout);
- }
+ setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
+ if (sub->timeout != TIPC_WAIT_FOREVER)
+ sub->timeout += jiffies;
+ if (!mod_timer(&sub->timer, sub->timeout))
+ tipc_subscrb_get(subscriber);
*sub_p = sub;
return 0;
}
/* Handle one termination request for the subscriber */
-static void subscr_conn_shutdown_event(int conid, void *usr_data)
+static void tipc_subscrb_shutdown_cb(int conid, void *usr_data)
{
- subscr_release((struct tipc_subscriber *)usr_data);
+ tipc_subscrb_delete((struct tipc_subscriber *)usr_data);
}
/* Handle one request to create a new subscription for the subscriber */
-static void subscr_conn_msg_event(struct net *net, int conid,
- struct sockaddr_tipc *addr, void *usr_data,
- void *buf, size_t len)
+static void tipc_subscrb_rcv_cb(struct net *net, int conid,
+ struct sockaddr_tipc *addr, void *usr_data,
+ void *buf, size_t len)
{
struct tipc_subscriber *subscriber = usr_data;
struct tipc_subscription *sub = NULL;
struct tipc_net *tn = net_generic(net, tipc_net_id);
- spin_lock_bh(&subscriber->lock);
- subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
+ tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
if (sub)
tipc_nametbl_subscribe(sub);
else
tipc_conn_terminate(tn->topsrv, subscriber->conid);
- spin_unlock_bh(&subscriber->lock);
}
/* Handle one request to establish a new subscriber */
-static void *subscr_named_msg_event(int conid)
+static void *tipc_subscrb_connect_cb(int conid)
{
- struct tipc_subscriber *subscriber;
-
- /* Create subscriber object */
- subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
- if (subscriber == NULL) {
- pr_warn("Subscriber rejected, no memory\n");
- return NULL;
- }
- INIT_LIST_HEAD(&subscriber->subscription_list);
- subscriber->conid = conid;
- spin_lock_init(&subscriber->lock);
-
- return (void *)subscriber;
+ return (void *)tipc_subscrb_create(conid);
}
-int tipc_subscr_start(struct net *net)
+int tipc_topsrv_start(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
const char name[] = "topology_server";
@@ -355,9 +333,9 @@ int tipc_subscr_start(struct net *net)
topsrv->imp = TIPC_CRITICAL_IMPORTANCE;
topsrv->type = SOCK_SEQPACKET;
topsrv->max_rcvbuf_size = sizeof(struct tipc_subscr);
- topsrv->tipc_conn_recvmsg = subscr_conn_msg_event;
- topsrv->tipc_conn_new = subscr_named_msg_event;
- topsrv->tipc_conn_shutdown = subscr_conn_shutdown_event;
+ topsrv->tipc_conn_recvmsg = tipc_subscrb_rcv_cb;
+ topsrv->tipc_conn_new = tipc_subscrb_connect_cb;
+ topsrv->tipc_conn_shutdown = tipc_subscrb_shutdown_cb;
strncpy(topsrv->name, name, strlen(name) + 1);
tn->topsrv = topsrv;
@@ -366,7 +344,7 @@ int tipc_subscr_start(struct net *net)
return tipc_server_start(topsrv);
}
-void tipc_subscr_stop(struct net *net)
+void tipc_topsrv_stop(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_server *topsrv = tn->topsrv;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 33488bd9fe3c..92ee18cc5fe6 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -54,7 +54,7 @@ struct tipc_subscriber;
* @filter: event filtering to be done for subscription
* @timer: timer governing subscription duration (optional)
* @nameseq_list: adjacent subscriptions in name sequence's subscription list
- * @subscription_list: adjacent subscriptions in subscriber's subscription list
+ * @subscrp_list: adjacent subscriptions in subscriber's subscription list
* @server_ref: object reference of server port associated with subscription
* @swap: indicates if subscriber uses opposite endianness in its messages
* @evt: template for events generated by subscription
@@ -67,17 +67,17 @@ struct tipc_subscription {
u32 filter;
struct timer_list timer;
struct list_head nameseq_list;
- struct list_head subscription_list;
+ struct list_head subscrp_list;
int swap;
struct tipc_event evt;
};
-int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
- u32 found_upper);
-void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
- u32 found_upper, u32 event, u32 port_ref,
- u32 node, int must);
-int tipc_subscr_start(struct net *net);
-void tipc_subscr_stop(struct net *net);
+int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+ u32 found_upper);
+void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
+ u32 found_lower, u32 found_upper, u32 event,
+ u32 port_ref, u32 node, int must);
+int tipc_topsrv_start(struct net *net);
+void tipc_topsrv_stop(struct net *net);
#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 06430598cf51..03ee4d359f6a 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -140,12 +140,17 @@ static struct hlist_head *unix_sockets_unbound(void *addr)
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
- memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
+ UNIXCB(skb).secid = scm->secid;
}
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
- scm->secid = *UNIXSID(skb);
+ scm->secid = UNIXCB(skb).secid;
+}
+
+static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ return (scm->secid == UNIXCB(skb).secid);
}
#else
static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -153,6 +158,11 @@ static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
+
+static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
+{
+ return true;
+}
#endif /* CONFIG_SECURITY_NETWORK */
/*
@@ -518,6 +528,11 @@ static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
+static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
+ size_t size, int flags);
+static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
+ struct pipe_inode_info *, size_t size,
+ unsigned int flags);
static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
@@ -558,7 +573,8 @@ static const struct proto_ops unix_stream_ops = {
.sendmsg = unix_stream_sendmsg,
.recvmsg = unix_stream_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage,
+ .sendpage = unix_stream_sendpage,
+ .splice_read = unix_stream_splice_read,
.set_peek_off = unix_set_peek_off,
};
@@ -620,7 +636,7 @@ static struct proto unix_proto = {
*/
static struct lock_class_key af_unix_sk_receive_queue_lock_key;
-static struct sock *unix_create1(struct net *net, struct socket *sock)
+static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
{
struct sock *sk = NULL;
struct unix_sock *u;
@@ -629,7 +645,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
goto out;
- sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
+ sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
if (!sk)
goto out;
@@ -688,7 +704,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
return -ESOCKTNOSUPPORT;
}
- return unix_create1(net, sock) ? 0 : -ENOMEM;
+ return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
}
static int unix_release(struct socket *sock)
@@ -1088,7 +1104,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = -ENOMEM;
/* create new sock for complete connection */
- newsk = unix_create1(sock_net(sk), NULL);
+ newsk = unix_create1(sock_net(sk), NULL, 0);
if (newsk == NULL)
goto out;
@@ -1408,6 +1424,7 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
UNIXCB(skb).uid = scm->creds.uid;
UNIXCB(skb).gid = scm->creds.gid;
UNIXCB(skb).fp = NULL;
+ unix_get_secdata(scm, skb);
if (scm->fp && send_fds)
err = unix_attach_fds(scm, skb);
@@ -1503,7 +1520,6 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
if (err < 0)
goto out_free;
max_level = err + 1;
- unix_get_secdata(&scm, skb);
skb_put(skb, len - data_len);
skb->data_len = data_len;
@@ -1720,6 +1736,101 @@ out_err:
return sent ? : err;
}
+static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err = 0;
+ bool send_sigpipe = true;
+ struct sock *other, *sk = socket->sk;
+ struct sk_buff *skb, *newskb = NULL, *tail = NULL;
+
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ other = unix_peer(sk);
+ if (!other || sk->sk_state != TCP_ESTABLISHED)
+ return -ENOTCONN;
+
+ if (false) {
+alloc_skb:
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->readlock);
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+ &err, 0);
+ if (!newskb)
+ return err;
+ }
+
+ /* we must acquire readlock as we modify already present
+ * skbs in the sk_receive_queue and mess with skb->len
+ */
+ err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+ if (err) {
+ err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
+ send_sigpipe = false;
+ goto err;
+ }
+
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ err = -EPIPE;
+ goto err_unlock;
+ }
+
+ unix_state_lock(other);
+
+ if (sock_flag(other, SOCK_DEAD) ||
+ other->sk_shutdown & RCV_SHUTDOWN) {
+ err = -EPIPE;
+ goto err_state_unlock;
+ }
+
+ skb = skb_peek_tail(&other->sk_receive_queue);
+ if (tail && tail == skb) {
+ skb = newskb;
+ } else if (!skb) {
+ if (newskb)
+ skb = newskb;
+ else
+ goto alloc_skb;
+ } else if (newskb) {
+ /* this is fast path, we don't necessarily need to
+ * call to kfree_skb even though with newskb == NULL
+ * this - does no harm
+ */
+ consume_skb(newskb);
+ }
+
+ if (skb_append_pagefrags(skb, page, offset, size)) {
+ tail = skb;
+ goto alloc_skb;
+ }
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+ atomic_add(size, &sk->sk_wmem_alloc);
+
+ if (newskb)
+ __skb_queue_tail(&other->sk_receive_queue, newskb);
+
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->readlock);
+
+ other->sk_data_ready(other);
+
+ return size;
+
+err_state_unlock:
+ unix_state_unlock(other);
+err_unlock:
+ mutex_unlock(&unix_sk(other)->readlock);
+err:
+ kfree_skb(newskb);
+ if (send_sigpipe && !(flags & MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ return err;
+}
+
static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
@@ -1860,8 +1971,9 @@ out:
* Sleep until more data has arrived. But check for races..
*/
static long unix_stream_data_wait(struct sock *sk, long timeo,
- struct sk_buff *last)
+ struct sk_buff *last, unsigned int last_len)
{
+ struct sk_buff *tail;
DEFINE_WAIT(wait);
unix_state_lock(sk);
@@ -1869,7 +1981,9 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- if (skb_peek_tail(&sk->sk_receive_queue) != last ||
+ tail = skb_peek_tail(&sk->sk_receive_queue);
+ if (tail != last ||
+ (tail && tail->len != last_len) ||
sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
@@ -1897,38 +2011,50 @@ static unsigned int unix_skb_len(const struct sk_buff *skb)
return skb->len - UNIXCB(skb).consumed;
}
-static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t size, int flags)
+struct unix_stream_read_state {
+ int (*recv_actor)(struct sk_buff *, int, int,
+ struct unix_stream_read_state *);
+ struct socket *socket;
+ struct msghdr *msg;
+ struct pipe_inode_info *pipe;
+ size_t size;
+ int flags;
+ unsigned int splice_flags;
+};
+
+static int unix_stream_read_generic(struct unix_stream_read_state *state)
{
struct scm_cookie scm;
+ struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
- DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
int copied = 0;
+ int flags = state->flags;
int noblock = flags & MSG_DONTWAIT;
- int check_creds = 0;
+ bool check_creds = false;
int target;
int err = 0;
long timeo;
int skip;
+ size_t size = state->size;
+ unsigned int last_len;
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
err = -EOPNOTSUPP;
- if (flags&MSG_OOB)
+ if (flags & MSG_OOB)
goto out;
- target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
+ memset(&scm, 0, sizeof(scm));
+
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
-
- memset(&scm, 0, sizeof(scm));
-
err = mutex_lock_interruptible(&u->readlock);
if (unlikely(err)) {
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
@@ -1948,6 +2074,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
goto unlock;
}
last = skb = skb_peek(&sk->sk_receive_queue);
+ last_len = last ? last->len : 0;
again:
if (skb == NULL) {
unix_sk(sk)->recursion_level = 0;
@@ -1970,16 +2097,17 @@ again:
break;
mutex_unlock(&u->readlock);
- timeo = unix_stream_data_wait(sk, timeo, last);
+ timeo = unix_stream_data_wait(sk, timeo, last,
+ last_len);
- if (signal_pending(current)
- || mutex_lock_interruptible(&u->readlock)) {
+ if (signal_pending(current) ||
+ mutex_lock_interruptible(&u->readlock)) {
err = sock_intr_errno(timeo);
goto out;
}
continue;
- unlock:
+unlock:
unix_state_unlock(sk);
break;
}
@@ -1988,6 +2116,7 @@ again:
while (skip >= unix_skb_len(skb)) {
skip -= unix_skb_len(skb);
last = skb;
+ last_len = skb->len;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
goto again;
@@ -1999,23 +2128,27 @@ again:
/* Never glue messages from different writers */
if ((UNIXCB(skb).pid != scm.pid) ||
!uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
- !gid_eq(UNIXCB(skb).gid, scm.creds.gid))
+ !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
+ !unix_secdata_eq(&scm, skb))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
- check_creds = 1;
+ unix_set_secdata(&scm, skb);
+ check_creds = true;
}
/* Copy address just once */
- if (sunaddr) {
- unix_copy_addr(msg, skb->sk);
+ if (state->msg && state->msg->msg_name) {
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
+ state->msg->msg_name);
+ unix_copy_addr(state->msg, skb->sk);
sunaddr = NULL;
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
- if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
- msg, chunk)) {
+ chunk = state->recv_actor(skb, skip, chunk, state);
+ if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
break;
@@ -2053,11 +2186,85 @@ again:
} while (size);
mutex_unlock(&u->readlock);
- scm_recv(sock, msg, &scm, flags);
+ if (state->msg)
+ scm_recv(sock, state->msg, &scm, flags);
+ else
+ scm_destroy(&scm);
out:
return copied ? : err;
}
+static int unix_stream_read_actor(struct sk_buff *skb,
+ int skip, int chunk,
+ struct unix_stream_read_state *state)
+{
+ int ret;
+
+ ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
+ state->msg, chunk);
+ return ret ?: chunk;
+}
+
+static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct unix_stream_read_state state = {
+ .recv_actor = unix_stream_read_actor,
+ .socket = sock,
+ .msg = msg,
+ .size = size,
+ .flags = flags
+ };
+
+ return unix_stream_read_generic(&state);
+}
+
+static ssize_t skb_unix_socket_splice(struct sock *sk,
+ struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd)
+{
+ int ret;
+ struct unix_sock *u = unix_sk(sk);
+
+ mutex_unlock(&u->readlock);
+ ret = splice_to_pipe(pipe, spd);
+ mutex_lock(&u->readlock);
+
+ return ret;
+}
+
+static int unix_stream_splice_actor(struct sk_buff *skb,
+ int skip, int chunk,
+ struct unix_stream_read_state *state)
+{
+ return skb_splice_bits(skb, state->socket->sk,
+ UNIXCB(skb).consumed + skip,
+ state->pipe, chunk, state->splice_flags,
+ skb_unix_socket_splice);
+}
+
+static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t size, unsigned int flags)
+{
+ struct unix_stream_read_state state = {
+ .recv_actor = unix_stream_splice_actor,
+ .socket = sock,
+ .pipe = pipe,
+ .size = size,
+ .splice_flags = flags,
+ };
+
+ if (unlikely(*ppos))
+ return -ESPIPE;
+
+ if (sock->file->f_flags & O_NONBLOCK ||
+ flags & SPLICE_F_NONBLOCK)
+ state.flags = MSG_DONTWAIT;
+
+ return unix_stream_read_generic(&state);
+}
+
static int unix_shutdown(struct socket *sock, int mode)
{
struct sock *sk = sock->sk;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 2ec86e652a19..df5fc6b340f1 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -581,13 +581,14 @@ struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
gfp_t priority,
- unsigned short type)
+ unsigned short type,
+ int kern)
{
struct sock *sk;
struct vsock_sock *psk;
struct vsock_sock *vsk;
- sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
+ sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
if (!sk)
return NULL;
@@ -1866,7 +1867,7 @@ static int vsock_create(struct net *net, struct socket *sock,
sock->state = SS_UNCONNECTED;
- return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
+ return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
}
static const struct net_proto_family vsock_family_ops = {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c294da095461..1f63daff3965 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1022,7 +1022,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
}
pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
- sk->sk_type);
+ sk->sk_type, 0);
if (!pending) {
vmci_transport_send_reset(sk, pkt);
return -ENOMEM;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 7aaf7415dc4c..915b328b9ac5 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -698,19 +698,20 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
EXPORT_SYMBOL(cfg80211_chandef_usable);
/*
- * For GO only, check if the channel can be used under permissive conditions
- * mandated by the some regulatory bodies, i.e., the channel is marked with
- * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
+ * Check if the channel can be used under permissive conditions mandated by
+ * some regulatory bodies, i.e., the channel is marked with
+ * IEEE80211_CHAN_IR_CONCURRENT and there is an additional station interface
* associated to an AP on the same channel or on the same UNII band
* (assuming that the AP is an authorized master).
- * In addition allow the GO to operate on a channel on which indoor operation is
+ * In addition allow operation on a channel on which indoor operation is
* allowed, iff we are currently operating in an indoor environment.
*/
-static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
+static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
+ enum nl80211_iftype iftype,
struct ieee80211_channel *chan)
{
- struct wireless_dev *wdev_iter;
- struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
+ struct wireless_dev *wdev;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
ASSERT_RTNL();
@@ -718,32 +719,48 @@ static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
!(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
return false;
+ /* only valid for GO and TDLS off-channel (station/p2p-CL) */
+ if (iftype != NL80211_IFTYPE_P2P_GO &&
+ iftype != NL80211_IFTYPE_STATION &&
+ iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return false;
+
if (regulatory_indoor_allowed() &&
(chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
return true;
- if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
+ if (!(chan->flags & IEEE80211_CHAN_IR_CONCURRENT))
return false;
/*
* Generally, it is possible to rely on another device/driver to allow
- * the GO concurrent relaxation, however, since the device can further
+ * the IR concurrent relaxation, however, since the device can further
* enforce the relaxation (by doing a similar verifications as this),
* and thus fail the GO instantiation, consider only the interfaces of
* the current registered device.
*/
- list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
struct ieee80211_channel *other_chan = NULL;
int r1, r2;
- if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
- !netif_running(wdev_iter->netdev))
- continue;
-
- wdev_lock(wdev_iter);
- if (wdev_iter->current_bss)
- other_chan = wdev_iter->current_bss->pub.channel;
- wdev_unlock(wdev_iter);
+ wdev_lock(wdev);
+ if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ wdev->current_bss)
+ other_chan = wdev->current_bss->pub.channel;
+
+ /*
+ * If a GO already operates on the same GO_CONCURRENT channel,
+ * this one (maybe the same one) can beacon as well. We allow
+ * the operation even if the station we relied on with
+ * GO_CONCURRENT is disconnected now. But then we must make sure
+ * we're not outdoor on an indoor-only channel.
+ */
+ if (iftype == NL80211_IFTYPE_P2P_GO &&
+ wdev->iftype == NL80211_IFTYPE_P2P_GO &&
+ wdev->beacon_interval &&
+ !(chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
+ other_chan = wdev->chandef.chan;
+ wdev_unlock(wdev);
if (!other_chan)
continue;
@@ -784,7 +801,6 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef,
enum nl80211_iftype iftype)
{
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
bool res;
u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_RADAR;
@@ -792,13 +808,12 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
/*
- * Under certain conditions suggested by the some regulatory bodies
- * a GO can operate on channels marked with IEEE80211_NO_IR
- * so set this flag only if such relaxations are not enabled and
- * the conditions are not met.
+ * Under certain conditions suggested by some regulatory bodies a
+ * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
+ * only if such relaxations are not enabled and the conditions are not
+ * met.
*/
- if (iftype != NL80211_IFTYPE_P2P_GO ||
- !cfg80211_go_permissive_chan(rdev, chandef->chan))
+ if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
prohibited_flags |= IEEE80211_CHAN_NO_IR;
if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 801cd49c5a0c..311eef26bf88 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -222,6 +222,7 @@ struct cfg80211_event {
const u8 *ie;
size_t ie_len;
u16 reason;
+ bool locally_generated;
} dc;
struct {
u8 bssid[ETH_ALEN];
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index dd78445c7d50..c264effd00a6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -639,8 +639,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
goto nla_put_failure;
- if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
- nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
+ if ((chan->flags & IEEE80211_CHAN_IR_CONCURRENT) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_IR_CONCURRENT))
goto nla_put_failure;
if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
@@ -4061,7 +4061,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
return -EINVAL;
break;
case CFG80211_STA_MESH_PEER_USER:
- if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
+ if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION &&
+ params->plink_action != NL80211_PLINK_ACTION_BLOCK)
return -EINVAL;
break;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 0e347f888fe9..d359e0610198 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -989,8 +989,8 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_OFDM;
if (rd_flags & NL80211_RRF_NO_OUTDOOR)
channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
- if (rd_flags & NL80211_RRF_GO_CONCURRENT)
- channel_flags |= IEEE80211_CHAN_GO_CONCURRENT;
+ if (rd_flags & NL80211_RRF_IR_CONCURRENT)
+ channel_flags |= IEEE80211_CHAN_IR_CONCURRENT;
if (rd_flags & NL80211_RRF_NO_HT40MINUS)
channel_flags |= IEEE80211_CHAN_NO_HT40MINUS;
if (rd_flags & NL80211_RRF_NO_HT40PLUS)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d11454f87bac..8020b5b094d4 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -938,7 +938,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
}
void cfg80211_disconnected(struct net_device *dev, u16 reason,
- const u8 *ie, size_t ie_len, gfp_t gfp)
+ const u8 *ie, size_t ie_len,
+ bool locally_generated, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -954,6 +955,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
ev->dc.ie_len = ie_len;
memcpy((void *)ev->dc.ie, ie, ie_len);
ev->dc.reason = reason;
+ ev->dc.locally_generated = locally_generated;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9ee6bc1a7610..9cee0220665d 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -86,7 +86,7 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev;
@@ -95,7 +95,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
cfg80211_leave(rdev, wdev);
}
-static int wiphy_suspend(struct device *dev, pm_message_t state)
+static int wiphy_suspend(struct device *dev)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
int ret = 0;
@@ -136,6 +136,11 @@ static int wiphy_resume(struct device *dev)
return ret;
}
+
+static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume);
+#define WIPHY_PM_OPS (&wiphy_pm_ops)
+#else
+#define WIPHY_PM_OPS NULL
#endif
static const void *wiphy_namespace(struct device *d)
@@ -151,10 +156,7 @@ struct class ieee80211_class = {
.dev_release = wiphy_dev_release,
.dev_groups = ieee80211_groups,
.dev_uevent = wiphy_uevent,
-#ifdef CONFIG_PM
- .suspend = wiphy_suspend,
- .resume = wiphy_resume,
-#endif
+ .pm = WIPHY_PM_OPS,
.ns_type = &net_ns_type_operations,
.namespace = wiphy_namespace,
};
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 70051ab52f4f..baf7218cec15 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -887,7 +887,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
case EVENT_DISCONNECTED:
__cfg80211_disconnected(wdev->netdev,
ev->dc.ie, ev->dc.ie_len,
- ev->dc.reason, true);
+ ev->dc.reason,
+ !ev->dc.locally_generated);
break;
case EVENT_IBSS_JOINED:
__cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
@@ -944,7 +945,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
ntype == NL80211_IFTYPE_P2P_CLIENT))
return -EBUSY;
- if (ntype != otype && netif_running(dev)) {
+ if (ntype != otype) {
dev->ieee80211_ptr->use_4addr = false;
dev->ieee80211_ptr->mesh_id_up_len = 0;
wdev_lock(dev->ieee80211_ptr);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index c3ab230e4493..a750f330b8dd 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -515,10 +515,10 @@ static struct proto x25_proto = {
.obj_size = sizeof(struct x25_sock),
};
-static struct sock *x25_alloc_socket(struct net *net)
+static struct sock *x25_alloc_socket(struct net *net, int kern)
{
struct x25_sock *x25;
- struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
+ struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern);
if (!sk)
goto out;
@@ -553,7 +553,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
goto out;
rc = -ENOBUFS;
- if ((sk = x25_alloc_socket(net)) == NULL)
+ if ((sk = x25_alloc_socket(net, kern)) == NULL)
goto out;
x25 = x25_sk(sk);
@@ -602,7 +602,7 @@ static struct sock *x25_make_new(struct sock *osk)
if (osk->sk_type != SOCK_SEQPACKET)
goto out;
- if ((sk = x25_alloc_socket(sock_net(osk))) == NULL)
+ if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL)
goto out;
x25 = x25_sk(sk);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b58286ecd156..60ce7014e1b0 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -31,7 +31,7 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_input_afinfo_lock);
if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
- err = -ENOBUFS;
+ err = -EEXIST;
else
rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
spin_unlock_bh(&xfrm_input_afinfo_lock);
@@ -254,13 +254,13 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb->sp->xvec[skb->sp->len++] = x;
spin_lock(&x->lock);
- if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
- goto drop_unlock;
- }
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
+ if (x->km.state == XFRM_STATE_ACQ)
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+ else
+ XFRM_INC_STATS(net,
+ LINUX_MIB_XFRMINSTATEINVALID);
goto drop_unlock;
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index fbcedbe33190..68ada2ca4b60 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -38,6 +38,18 @@ static int xfrm_skb_check_space(struct sk_buff *skb)
return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
}
+/* Children define the path of the packet through the
+ * Linux networking. Thus, destinations are stackable.
+ */
+
+static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
+{
+ struct dst_entry *child = dst_clone(skb_dst(skb)->child);
+
+ skb_dst_drop(skb);
+ return child;
+}
+
static int xfrm_output_one(struct sk_buff *skb, int err)
{
struct dst_entry *dst = skb_dst(skb);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 638af0655aaf..18cead7645be 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -315,14 +315,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
}
EXPORT_SYMBOL(xfrm_policy_destroy);
-static void xfrm_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(list)) != NULL)
- kfree_skb(skb);
-}
-
/* Rule must be locked. Release descentant resources, announce
* entry dead. The rule must be unlinked from lists to the moment.
*/
@@ -335,7 +327,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
if (del_timer(&policy->polq.hold_timer))
xfrm_pol_put(policy);
- xfrm_queue_purge(&policy->polq.hold_queue);
+ skb_queue_purge(&policy->polq.hold_queue);
if (del_timer(&policy->timer))
xfrm_pol_put(policy);
@@ -708,6 +700,9 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
struct xfrm_policy_queue *pq = &old->polq;
struct sk_buff_head list;
+ if (skb_queue_empty(&pq->hold_queue))
+ return;
+
__skb_queue_head_init(&list);
spin_lock_bh(&pq->hold_queue.lock);
@@ -716,9 +711,6 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
xfrm_pol_put(old);
spin_unlock_bh(&pq->hold_queue.lock);
- if (skb_queue_empty(&list))
- return;
-
pq = &new->polq;
spin_lock_bh(&pq->hold_queue.lock);
@@ -1012,7 +1004,9 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
if (list_empty(&walk->walk.all))
x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
else
- x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
+ x = list_first_entry(&walk->walk.all,
+ struct xfrm_policy_walk_entry, all);
+
list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
if (x->dead)
continue;
@@ -1120,6 +1114,9 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
chain = &net->xfrm.policy_inexact[dir];
hlist_for_each_entry(pol, chain, bydst) {
+ if ((pol->priority >= priority) && ret)
+ break;
+
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -1128,13 +1125,13 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
ret = ERR_PTR(err);
goto fail;
}
- } else if (pol->priority < priority) {
+ } else {
ret = pol;
break;
}
}
- if (ret)
- xfrm_pol_hold(ret);
+
+ xfrm_pol_hold(ret);
fail:
read_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -1955,7 +1952,7 @@ out:
purge_queue:
pq->timeout = 0;
- xfrm_queue_purge(&pq->hold_queue);
+ skb_queue_purge(&pq->hold_queue);
xfrm_pol_put(pol);
}
@@ -2814,7 +2811,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
return -EAFNOSUPPORT;
spin_lock(&xfrm_policy_afinfo_lock);
if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
- err = -ENOBUFS;
+ err = -EEXIST;
else {
struct dst_ops *dst_ops = afinfo->dst_ops;
if (likely(dst_ops->kmem_cachep == NULL))
@@ -3209,16 +3206,17 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
}
chain = &net->xfrm.policy_inexact[dir];
hlist_for_each_entry(pol, chain, bydst) {
+ if ((pol->priority >= priority) && ret)
+ break;
+
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
- pol->type == type &&
- pol->priority < priority) {
+ pol->type == type) {
ret = pol;
break;
}
}
- if (ret)
- xfrm_pol_hold(ret);
+ xfrm_pol_hold(ret);
read_unlock_bh(&net->xfrm.xfrm_policy_lock);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 96688cd0f6f1..9895a8c56d8c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1626,7 +1626,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
if (list_empty(&walk->all))
x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
else
- x = list_entry(&walk->all, struct xfrm_state_walk, all);
+ x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
list_for_each_entry_from(x, &net->xfrm.state_all, all) {
if (x->state == XFRM_STATE_DEAD)
continue;
@@ -1908,7 +1908,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_state_afinfo_lock);
if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
- err = -ENOBUFS;
+ err = -EEXIST;
else
rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
spin_unlock_bh(&xfrm_state_afinfo_lock);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 76e3458a5419..46c6a8cf74d3 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -6,29 +6,35 @@ hostprogs-y := test_verifier test_maps
hostprogs-y += sock_example
hostprogs-y += sockex1
hostprogs-y += sockex2
+hostprogs-y += sockex3
hostprogs-y += tracex1
hostprogs-y += tracex2
hostprogs-y += tracex3
hostprogs-y += tracex4
+hostprogs-y += tracex5
test_verifier-objs := test_verifier.o libbpf.o
test_maps-objs := test_maps.o libbpf.o
sock_example-objs := sock_example.o libbpf.o
sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
+sockex3-objs := bpf_load.o libbpf.o sockex3_user.o
tracex1-objs := bpf_load.o libbpf.o tracex1_user.o
tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
+tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
# Tell kbuild to always build the programs
always := $(hostprogs-y)
always += sockex1_kern.o
always += sockex2_kern.o
+always += sockex3_kern.o
always += tracex1_kern.o
always += tracex2_kern.o
always += tracex3_kern.o
always += tracex4_kern.o
+always += tracex5_kern.o
always += tcbpf1_kern.o
HOSTCFLAGS += -I$(objtree)/usr/include
@@ -36,15 +42,17 @@ HOSTCFLAGS += -I$(objtree)/usr/include
HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
HOSTLOADLIBES_sockex1 += -lelf
HOSTLOADLIBES_sockex2 += -lelf
+HOSTLOADLIBES_sockex3 += -lelf
HOSTLOADLIBES_tracex1 += -lelf
HOSTLOADLIBES_tracex2 += -lelf
HOSTLOADLIBES_tracex3 += -lelf
HOSTLOADLIBES_tracex4 += -lelf -lrt
+HOSTLOADLIBES_tracex5 += -lelf
# point this to your LLVM backend with bpf support
LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
-%.o: %.c
+$(obj)/%.o: $(src)/%.c
clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index f960b5fb3ed8..f531a0b3282d 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -21,6 +21,10 @@ static unsigned long long (*bpf_ktime_get_ns)(void) =
(void *) BPF_FUNC_ktime_get_ns;
static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
(void *) BPF_FUNC_trace_printk;
+static void (*bpf_tail_call)(void *ctx, void *map, int index) =
+ (void *) BPF_FUNC_tail_call;
+static unsigned long long (*bpf_get_smp_processor_id)(void) =
+ (void *) BPF_FUNC_get_smp_processor_id;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index 38dac5a53b51..da86a8e0a95a 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -16,6 +16,7 @@
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <poll.h>
+#include <ctype.h>
#include "libbpf.h"
#include "bpf_helpers.h"
#include "bpf_load.h"
@@ -29,6 +30,19 @@ int map_fd[MAX_MAPS];
int prog_fd[MAX_PROGS];
int event_fd[MAX_PROGS];
int prog_cnt;
+int prog_array_fd = -1;
+
+static int populate_prog_array(const char *event, int prog_fd)
+{
+ int ind = atoi(event), err;
+
+ err = bpf_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
+ if (err < 0) {
+ printf("failed to store prog_fd in prog_array\n");
+ return -1;
+ }
+ return 0;
+}
static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
{
@@ -54,12 +68,40 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
return -1;
}
+ fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
+ if (fd < 0) {
+ printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
+ return -1;
+ }
+
+ prog_fd[prog_cnt++] = fd;
+
+ if (is_socket) {
+ event += 6;
+ if (*event != '/')
+ return 0;
+ event++;
+ if (!isdigit(*event)) {
+ printf("invalid prog number\n");
+ return -1;
+ }
+ return populate_prog_array(event, fd);
+ }
+
if (is_kprobe || is_kretprobe) {
if (is_kprobe)
event += 7;
else
event += 10;
+ if (*event == 0) {
+ printf("event name cannot be empty\n");
+ return -1;
+ }
+
+ if (isdigit(*event))
+ return populate_prog_array(event, fd);
+
snprintf(buf, sizeof(buf),
"echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
is_kprobe ? 'p' : 'r', event, event);
@@ -71,18 +113,6 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
}
}
- fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
-
- if (fd < 0) {
- printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
- return -1;
- }
-
- prog_fd[prog_cnt++] = fd;
-
- if (is_socket)
- return 0;
-
strcpy(buf, DEBUGFS);
strcat(buf, "events/kprobes/");
strcat(buf, event);
@@ -130,6 +160,9 @@ static int load_maps(struct bpf_map_def *maps, int len)
maps[i].max_entries);
if (map_fd[i] < 0)
return 1;
+
+ if (maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
+ prog_array_fd = map_fd[i];
}
return 0;
}
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
new file mode 100644
index 000000000000..41ae2fd21b13
--- /dev/null
+++ b/samples/bpf/sockex3_kern.c
@@ -0,0 +1,290 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+#include <uapi/linux/in.h>
+#include <uapi/linux/if.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/if_tunnel.h>
+#include <uapi/linux/mpls.h>
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1FFF
+
+#define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
+
+struct bpf_map_def SEC("maps") jmp_table = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u32),
+ .max_entries = 8,
+};
+
+#define PARSE_VLAN 1
+#define PARSE_MPLS 2
+#define PARSE_IP 3
+#define PARSE_IPV6 4
+
+/* protocol dispatch routine.
+ * It tail-calls next BPF program depending on eth proto
+ * Note, we could have used:
+ * bpf_tail_call(skb, &jmp_table, proto);
+ * but it would need large prog_array
+ */
+static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
+{
+ switch (proto) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
+ break;
+ case ETH_P_MPLS_UC:
+ case ETH_P_MPLS_MC:
+ bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
+ break;
+ case ETH_P_IP:
+ bpf_tail_call(skb, &jmp_table, PARSE_IP);
+ break;
+ case ETH_P_IPV6:
+ bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
+ break;
+ }
+}
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+struct flow_keys {
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ __u32 ip_proto;
+};
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+ return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+ & (IP_MF | IP_OFFSET);
+}
+
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
+{
+ __u64 w0 = load_word(ctx, off);
+ __u64 w1 = load_word(ctx, off + 4);
+ __u64 w2 = load_word(ctx, off + 8);
+ __u64 w3 = load_word(ctx, off + 12);
+
+ return (__u32)(w0 ^ w1 ^ w2 ^ w3);
+}
+
+struct globals {
+ struct flow_keys flow;
+};
+
+struct bpf_map_def SEC("maps") percpu_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct globals),
+ .max_entries = 32,
+};
+
+/* user poor man's per_cpu until native support is ready */
+static struct globals *this_cpu_globals(void)
+{
+ u32 key = bpf_get_smp_processor_id();
+
+ return bpf_map_lookup_elem(&percpu_map, &key);
+}
+
+/* some simple stats for user space consumption */
+struct pair {
+ __u64 packets;
+ __u64 bytes;
+};
+
+struct bpf_map_def SEC("maps") hash_map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct flow_keys),
+ .value_size = sizeof(struct pair),
+ .max_entries = 1024,
+};
+
+static void update_stats(struct __sk_buff *skb, struct globals *g)
+{
+ struct flow_keys key = g->flow;
+ struct pair *value;
+
+ value = bpf_map_lookup_elem(&hash_map, &key);
+ if (value) {
+ __sync_fetch_and_add(&value->packets, 1);
+ __sync_fetch_and_add(&value->bytes, skb->len);
+ } else {
+ struct pair val = {1, skb->len};
+
+ bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
+ }
+}
+
+static __always_inline void parse_ip_proto(struct __sk_buff *skb,
+ struct globals *g, __u32 ip_proto)
+{
+ __u32 nhoff = skb->cb[0];
+ int poff;
+
+ switch (ip_proto) {
+ case IPPROTO_GRE: {
+ struct gre_hdr {
+ __be16 flags;
+ __be16 proto;
+ };
+
+ __u32 gre_flags = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, flags));
+ __u32 gre_proto = load_half(skb,
+ nhoff + offsetof(struct gre_hdr, proto));
+
+ if (gre_flags & (GRE_VERSION|GRE_ROUTING))
+ break;
+
+ nhoff += 4;
+ if (gre_flags & GRE_CSUM)
+ nhoff += 4;
+ if (gre_flags & GRE_KEY)
+ nhoff += 4;
+ if (gre_flags & GRE_SEQ)
+ nhoff += 4;
+
+ skb->cb[0] = nhoff;
+ parse_eth_proto(skb, gre_proto);
+ break;
+ }
+ case IPPROTO_IPIP:
+ parse_eth_proto(skb, ETH_P_IP);
+ break;
+ case IPPROTO_IPV6:
+ parse_eth_proto(skb, ETH_P_IPV6);
+ break;
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ g->flow.ports = load_word(skb, nhoff);
+ case IPPROTO_ICMP:
+ g->flow.ip_proto = ip_proto;
+ update_stats(skb, g);
+ break;
+ default:
+ break;
+ }
+}
+
+PROG(PARSE_IP)(struct __sk_buff *skb)
+{
+ struct globals *g = this_cpu_globals();
+ __u32 nhoff, verlen, ip_proto;
+
+ if (!g)
+ return 0;
+
+ nhoff = skb->cb[0];
+
+ if (unlikely(ip_is_fragment(skb, nhoff)))
+ return 0;
+
+ ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
+
+ if (ip_proto != IPPROTO_GRE) {
+ g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
+ g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
+ }
+
+ verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
+ nhoff += (verlen & 0xF) << 2;
+
+ skb->cb[0] = nhoff;
+ parse_ip_proto(skb, g, ip_proto);
+ return 0;
+}
+
+PROG(PARSE_IPV6)(struct __sk_buff *skb)
+{
+ struct globals *g = this_cpu_globals();
+ __u32 nhoff, ip_proto;
+
+ if (!g)
+ return 0;
+
+ nhoff = skb->cb[0];
+
+ ip_proto = load_byte(skb,
+ nhoff + offsetof(struct ipv6hdr, nexthdr));
+ g->flow.src = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, saddr));
+ g->flow.dst = ipv6_addr_hash(skb,
+ nhoff + offsetof(struct ipv6hdr, daddr));
+ nhoff += sizeof(struct ipv6hdr);
+
+ skb->cb[0] = nhoff;
+ parse_ip_proto(skb, g, ip_proto);
+ return 0;
+}
+
+PROG(PARSE_VLAN)(struct __sk_buff *skb)
+{
+ __u32 nhoff, proto;
+
+ nhoff = skb->cb[0];
+
+ proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+ h_vlan_encapsulated_proto));
+ nhoff += sizeof(struct vlan_hdr);
+ skb->cb[0] = nhoff;
+
+ parse_eth_proto(skb, proto);
+
+ return 0;
+}
+
+PROG(PARSE_MPLS)(struct __sk_buff *skb)
+{
+ __u32 nhoff, label;
+
+ nhoff = skb->cb[0];
+
+ label = load_word(skb, nhoff);
+ nhoff += sizeof(struct mpls_label);
+ skb->cb[0] = nhoff;
+
+ if (label & MPLS_LS_S_MASK) {
+ __u8 verlen = load_byte(skb, nhoff);
+ if ((verlen & 0xF0) == 4)
+ parse_eth_proto(skb, ETH_P_IP);
+ else
+ parse_eth_proto(skb, ETH_P_IPV6);
+ } else {
+ parse_eth_proto(skb, ETH_P_MPLS_UC);
+ }
+
+ return 0;
+}
+
+SEC("socket/0")
+int main_prog(struct __sk_buff *skb)
+{
+ __u32 nhoff = ETH_HLEN;
+ __u32 proto = load_half(skb, 12);
+
+ skb->cb[0] = nhoff;
+ parse_eth_proto(skb, proto);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
new file mode 100644
index 000000000000..2617772d060d
--- /dev/null
+++ b/samples/bpf/sockex3_user.c
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+struct flow_keys {
+ __be32 src;
+ __be32 dst;
+ union {
+ __be32 ports;
+ __be16 port16[2];
+ };
+ __u32 ip_proto;
+};
+
+struct pair {
+ __u64 packets;
+ __u64 bytes;
+};
+
+int main(int argc, char **argv)
+{
+ char filename[256];
+ FILE *f;
+ int i, sock;
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ sock = open_raw_sock("lo");
+
+ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd[4],
+ sizeof(__u32)) == 0);
+
+ if (argc > 1)
+ f = popen("ping -c5 localhost", "r");
+ else
+ f = popen("netperf -l 4 localhost", "r");
+ (void) f;
+
+ for (i = 0; i < 5; i++) {
+ struct flow_keys key = {}, next_key;
+ struct pair value;
+
+ sleep(1);
+ printf("IP src.port -> dst.port bytes packets\n");
+ while (bpf_get_next_key(map_fd[2], &key, &next_key) == 0) {
+ bpf_lookup_elem(map_fd[2], &next_key, &value);
+ printf("%s.%05d -> %s.%05d %12lld %12lld\n",
+ inet_ntoa((struct in_addr){htonl(next_key.src)}),
+ next_key.port16[0],
+ inet_ntoa((struct in_addr){htonl(next_key.dst)}),
+ next_key.port16[1],
+ value.bytes, value.packets);
+ key = next_key;
+ }
+ }
+ return 0;
+}
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index 7c27710f8296..9bfb2eb34563 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -21,7 +21,7 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
{
- __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF);
+ __u8 old_tos = load_byte(skb, TOS_OFF);
bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
@@ -34,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
{
- __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF));
+ __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
@@ -44,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
{
- __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF));
+ __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
@@ -53,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
SEC("classifier")
int bpf_prog1(struct __sk_buff *skb)
{
- __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol));
+ __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
long *value;
if (proto == IPPROTO_TCP) {
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 12f3780af73f..693605997abc 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -29,6 +29,7 @@ struct bpf_test {
ACCEPT,
REJECT
} result;
+ enum bpf_prog_type prog_type;
};
static struct bpf_test tests[] = {
@@ -743,6 +744,84 @@ static struct bpf_test tests[] = {
.errstr = "different pointers",
.result = REJECT,
},
+ {
+ "check skb->mark is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check skb->tc_index is not writeable by sockets",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check non-u32 access to cb",
+ .insns = {
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check out of range skb->cb access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[60])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+ },
+ {
+ "write skb fields from socket prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "write skb fields from tc_cls_act prog",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
@@ -775,6 +854,7 @@ static int test(void)
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_insn *prog = tests[i].insns;
+ int prog_type = tests[i].prog_type;
int prog_len = probe_filter_length(prog);
int *fixup = tests[i].fixup;
int map_fd = -1;
@@ -789,8 +869,8 @@ static int test(void)
}
printf("#%d %s ", i, tests[i].descr);
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog,
- prog_len * sizeof(struct bpf_insn),
+ prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
+ prog, prog_len * sizeof(struct bpf_insn),
"GPL", 0);
if (tests[i].result == ACCEPT) {
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
new file mode 100644
index 000000000000..b71fe07a7a7a
--- /dev/null
+++ b/samples/bpf/tracex5_kern.c
@@ -0,0 +1,75 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/seccomp.h>
+#include "bpf_helpers.h"
+
+#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
+
+struct bpf_map_def SEC("maps") progs = {
+ .type = BPF_MAP_TYPE_PROG_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u32),
+ .max_entries = 1024,
+};
+
+SEC("kprobe/seccomp_phase1")
+int bpf_prog1(struct pt_regs *ctx)
+{
+ struct seccomp_data sd = {};
+
+ bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+
+ /* dispatch into next BPF program depending on syscall number */
+ bpf_tail_call(ctx, &progs, sd.nr);
+
+ /* fall through -> unknown syscall */
+ if (sd.nr >= __NR_getuid && sd.nr <= __NR_getsid) {
+ char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n";
+ bpf_trace_printk(fmt, sizeof(fmt), sd.nr);
+ }
+ return 0;
+}
+
+/* we jump here when syscall number == __NR_write */
+PROG(__NR_write)(struct pt_regs *ctx)
+{
+ struct seccomp_data sd = {};
+
+ bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+ if (sd.args[2] == 512) {
+ char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
+ bpf_trace_printk(fmt, sizeof(fmt),
+ sd.args[0], sd.args[1], sd.args[2]);
+ }
+ return 0;
+}
+
+PROG(__NR_read)(struct pt_regs *ctx)
+{
+ struct seccomp_data sd = {};
+
+ bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+ if (sd.args[2] > 128 && sd.args[2] <= 1024) {
+ char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
+ bpf_trace_printk(fmt, sizeof(fmt),
+ sd.args[0], sd.args[1], sd.args[2]);
+ }
+ return 0;
+}
+
+PROG(__NR_mmap)(struct pt_regs *ctx)
+{
+ char fmt[] = "mmap\n";
+ bpf_trace_printk(fmt, sizeof(fmt));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
new file mode 100644
index 000000000000..a04dd3cd4358
--- /dev/null
+++ b/samples/bpf/tracex5_user.c
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <sys/prctl.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+/* install fake seccomp program to enable seccomp code path inside the kernel,
+ * so that our kprobe attached to seccomp_phase1() can be triggered
+ */
+static void install_accept_all_seccomp(void)
+{
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+ };
+ struct sock_fprog prog = {
+ .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
+ .filter = filter,
+ };
+ if (prctl(PR_SET_SECCOMP, 2, &prog))
+ perror("prctl");
+}
+
+int main(int ac, char **argv)
+{
+ FILE *f;
+ char filename[256];
+
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+ if (load_bpf_file(filename)) {
+ printf("%s", bpf_log_buf);
+ return 1;
+ }
+
+ install_accept_all_seccomp();
+
+ f = popen("dd if=/dev/zero of=/dev/null count=5", "r");
+ (void) f;
+
+ read_trace_pipe();
+
+ return 0;
+}
diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst
new file mode 100644
index 000000000000..8365c4e5c513
--- /dev/null
+++ b/samples/pktgen/README.rst
@@ -0,0 +1,43 @@
+Sample and benchmark scripts for pktgen (packet generator)
+==========================================================
+This directory contains some pktgen sample and benchmark scripts, that
+can easily be copied and adjusted for your own use-case.
+
+General doc is located in kernel: Documentation/networking/pktgen.txt
+
+Helper include files
+====================
+This directory contains two helper shell files, that can be "included"
+by shell source'ing. Namely "functions.sh" and "parameters.sh".
+
+Common parameters
+-----------------
+The parameters.sh file support easy and consistant parameter parsing
+across the sample scripts. Usage example is printed on errors::
+
+ Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+ -i : ($DEV) output interface/device (required)
+ -s : ($PKT_SIZE) packet size
+ -d : ($DEST_IP) destination IP
+ -m : ($DST_MAC) destination MAC-addr
+ -t : ($THREADS) threads to start
+ -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+ -b : ($BURST) HW level bursting of SKBs
+ -v : ($VERBOSE) verbose
+ -x : ($DEBUG) debug
+
+The global variable being set is also listed. E.g. the required
+interface/device parameter "-i" sets variable $DEV.
+
+Common functions
+----------------
+The functions.sh file provides; Three different shell functions for
+configuring the different components of pktgen: pg_ctrl(), pg_thread()
+and pg_set().
+
+These functions correspond to pktgens different components.
+ * pg_ctrl() control "pgctrl" (/proc/net/pktgen/pgctrl)
+ * pg_thread() control the kernel threads and binding to devices
+ * pg_set() control setup of individual devices
+
+See sample scripts for usage examples.
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
new file mode 100644
index 000000000000..205e4cde4601
--- /dev/null
+++ b/samples/pktgen/functions.sh
@@ -0,0 +1,121 @@
+#
+# Common functions used by pktgen scripts
+# - Depending on bash 3 (or higher) syntax
+#
+# Author: Jesper Dangaaard Brouer
+# License: GPL
+
+## -- General shell logging cmds --
+function err() {
+ local exitcode=$1
+ shift
+ echo "ERROR: $@" >&2
+ exit $exitcode
+}
+
+function warn() {
+ echo "WARN : $@" >&2
+}
+
+function info() {
+ if [[ -n "$VERBOSE" ]]; then
+ echo "INFO : $@" >&2
+ fi
+}
+
+## -- Pktgen proc config commands -- ##
+export PROC_DIR=/proc/net/pktgen
+#
+# Three different shell functions for configuring the different
+# components of pktgen:
+# pg_ctrl(), pg_thread() and pg_set().
+#
+# These functions correspond to pktgens different components.
+# * pg_ctrl() control "pgctrl" (/proc/net/pktgen/pgctrl)
+# * pg_thread() control the kernel threads and binding to devices
+# * pg_set() control setup of individual devices
+function pg_ctrl() {
+ local proc_file="pgctrl"
+ proc_cmd ${proc_file} "$@"
+}
+
+function pg_thread() {
+ local thread=$1
+ local proc_file="kpktgend_${thread}"
+ shift
+ proc_cmd ${proc_file} "$@"
+}
+
+function pg_set() {
+ local dev=$1
+ local proc_file="$dev"
+ shift
+ proc_cmd ${proc_file} "$@"
+}
+
+# More generic replacement for pgset(), that does not depend on global
+# variable for proc file.
+function proc_cmd() {
+ local result
+ local proc_file=$1
+ # after shift, the remaining args are contained in $@
+ shift
+ local proc_ctrl=${PROC_DIR}/$proc_file
+ if [[ ! -e "$proc_ctrl" ]]; then
+ err 3 "proc file:$proc_ctrl does not exists (dev added to thread?)"
+ else
+ if [[ ! -w "$proc_ctrl" ]]; then
+ err 4 "proc file:$proc_ctrl not writable, not root?!"
+ fi
+ fi
+
+ if [[ "$DEBUG" == "yes" ]]; then
+ echo "cmd: $@ > $proc_ctrl"
+ fi
+ # Quoting of "$@" is important for space expansion
+ echo "$@" > "$proc_ctrl"
+ local status=$?
+
+ result=$(grep "Result: OK:" $proc_ctrl)
+ # Due to pgctrl, cannot use exit code $? from grep
+ if [[ "$result" == "" ]]; then
+ grep "Result:" $proc_ctrl >&2
+ fi
+ if (( $status != 0 )); then
+ err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
+ fi
+}
+
+# Old obsolete "pgset" function, with slightly improved err handling
+function pgset() {
+ local result
+
+ if [[ "$DEBUG" == "yes" ]]; then
+ echo "cmd: $1 > $PGDEV"
+ fi
+ echo $1 > $PGDEV
+ local status=$?
+
+ result=`cat $PGDEV | fgrep "Result: OK:"`
+ if [[ "$result" == "" ]]; then
+ cat $PGDEV | fgrep Result:
+ fi
+ if (( $status != 0 )); then
+ err 5 "Write error($status) occurred cmd: \"$1 > $PGDEV\""
+ fi
+}
+
+## -- General shell tricks --
+
+function root_check_run_with_sudo() {
+ # Trick so, program can be run as normal user, will just use "sudo"
+ # call as root_check_run_as_sudo "$@"
+ if [ "$EUID" -ne 0 ]; then
+ if [ -x $0 ]; then # Directly executable use sudo
+ info "Not root, running with sudo"
+ sudo "$0" "$@"
+ exit $?
+ fi
+ err 4 "cannot perform sudo run of $0"
+ fi
+}
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
new file mode 100644
index 000000000000..33b70fdd5a4a
--- /dev/null
+++ b/samples/pktgen/parameters.sh
@@ -0,0 +1,97 @@
+#
+# Common parameter parsing for pktgen scripts
+#
+
+function usage() {
+ echo ""
+ echo "Usage: $0 [-vx] -i ethX"
+ echo " -i : (\$DEV) output interface/device (required)"
+ echo " -s : (\$PKT_SIZE) packet size"
+ echo " -d : (\$DEST_IP) destination IP"
+ echo " -m : (\$DST_MAC) destination MAC-addr"
+ echo " -t : (\$THREADS) threads to start"
+ echo " -c : (\$SKB_CLONE) SKB clones send before alloc new SKB"
+ echo " -b : (\$BURST) HW level bursting of SKBs"
+ echo " -v : (\$VERBOSE) verbose"
+ echo " -x : (\$DEBUG) debug"
+ echo ""
+}
+
+## --- Parse command line arguments / parameters ---
+## echo "Commandline options:"
+while getopts "s:i:d:m:t:c:b:vxh" option; do
+ case $option in
+ i) # interface
+ export DEV=$OPTARG
+ info "Output device set to: DEV=$DEV"
+ ;;
+ s)
+ export PKT_SIZE=$OPTARG
+ info "Packet size set to: PKT_SIZE=$PKT_SIZE bytes"
+ ;;
+ d) # destination IP
+ export DEST_IP=$OPTARG
+ info "Destination IP set to: DEST_IP=$DEST_IP"
+ ;;
+ m) # MAC
+ export DST_MAC=$OPTARG
+ info "Destination MAC set to: DST_MAC=$DST_MAC"
+ ;;
+ t)
+ export THREADS=$OPTARG
+ export CPU_THREADS=$OPTARG
+ let "CPU_THREADS -= 1"
+ info "Number of threads to start: $THREADS (0 to $CPU_THREADS)"
+ ;;
+ c)
+ export CLONE_SKB=$OPTARG
+ info "CLONE_SKB=$CLONE_SKB"
+ ;;
+ b)
+ export BURST=$OPTARG
+ info "SKB bursting: BURST=$BURST"
+ ;;
+ v)
+ export VERBOSE=yes
+ info "Verbose mode: VERBOSE=$VERBOSE"
+ ;;
+ x)
+ export DEBUG=yes
+ info "Debug mode: DEBUG=$DEBUG"
+ ;;
+ h|?|*)
+ usage;
+ err 2 "[ERROR] Unknown parameters!!!"
+ esac
+done
+shift $(( $OPTIND - 1 ))
+
+if [ -z "$PKT_SIZE" ]; then
+ # NIC adds 4 bytes CRC
+ export PKT_SIZE=60
+ info "Default packet size set to: set to: $PKT_SIZE bytes"
+fi
+
+if [ -z "$THREADS" ]; then
+ # Zero CPU threads means one thread, because CPU numbers are zero indexed
+ export CPU_THREADS=0
+ export THREADS=1
+fi
+
+if [ -z "$DEV" ]; then
+ usage
+ err 2 "Please specify output device"
+fi
+
+if [ -z "$DST_MAC" ]; then
+ warn "Missing destination MAC address"
+fi
+
+if [ -z "$DEST_IP" ]; then
+ warn "Missing destination IP address"
+fi
+
+if [ ! -d /proc/net/pktgen ]; then
+ info "Loading kernel module: pktgen"
+ modprobe pktgen
+fi
diff --git a/samples/pktgen/pktgen.conf-1-1 b/samples/pktgen/pktgen.conf-1-1
deleted file mode 100755
index f91daad9e916..000000000000
--- a/samples/pktgen/pktgen.conf-1-1
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
- local result
-
- echo $1 > $PGDEV
-
- result=`cat $PGDEV | fgrep "Result: OK:"`
- if [ "$result" = "" ]; then
- cat $PGDEV | fgrep Result:
- fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. One CPU example. We add eth1.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
- echo "Removing all devices"
- pgset "rem_device_all"
- echo "Adding eth1"
- pgset "add_device eth1"
-
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
- echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac 00:04:23:08:91:dc"
-
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-1 b/samples/pktgen/pktgen.conf-2-1
deleted file mode 100755
index e108e97d6d89..000000000000
--- a/samples/pktgen/pktgen.conf-2-1
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
- local result
-
- echo $1 > $PGDEV
-
- result=`cat $PGDEV | fgrep "Result: OK:"`
- if [ "$result" = "" ]; then
- cat $PGDEV | fgrep Result:
- fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. Two CPU example. We add eth1 to the first
-# and leave the second idle.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
- echo "Removing all devices"
- pgset "rem_device_all"
- echo "Adding eth1"
- pgset "add_device eth1"
-
-# We need to remove old config since we dont use this thread. We can only
-# one NIC on one CPU due to affinity reasons.
-
-PGDEV=/proc/net/pktgen/kpktgend_1
- echo "Removing all devices"
- pgset "rem_device_all"
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
- echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac 00:04:23:08:91:dc"
-
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-2 b/samples/pktgen/pktgen.conf-2-2
deleted file mode 100755
index acea15503e71..000000000000
--- a/samples/pktgen/pktgen.conf-2-2
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
- local result
-
- echo $1 > $PGDEV
-
- result=`cat $PGDEV | fgrep "Result: OK:"`
- if [ "$result" = "" ]; then
- cat $PGDEV | fgrep Result:
- fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. Two CPU example. We add eth1, eth2 respectively.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
- echo "Removing all devices"
- pgset "rem_device_all"
- echo "Adding eth1"
- pgset "add_device eth1"
-
-PGDEV=/proc/net/pktgen/kpktgend_1
- echo "Removing all devices"
- pgset "rem_device_all"
- echo "Adding eth2"
- pgset "add_device eth2"
-
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
- echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac 00:04:23:08:91:dc"
-
-PGDEV=/proc/net/pktgen/eth2
- echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 192.168.2.2"
- pgset "dst_mac 00:04:23:08:91:de"
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
new file mode 100755
index 000000000000..cb1590331b47
--- /dev/null
+++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+# Benchmark script:
+# - developed for benchmarking ingress qdisc path
+#
+# Script for injecting packets into RX path of the stack with pktgen
+# "xmit_mode netif_receive". With an invalid dst_mac this will only
+# measure the ingress code path as packets gets dropped in ip_rcv().
+#
+# This script don't really need any hardware. It benchmarks software
+# RX path just after NIC driver level. With bursting is also
+# "removes" the SKB alloc/free overhead.
+#
+# Setup scenarios for measuring ingress qdisc (with invalid dst_mac):
+# ------------------------------------------------------------------
+# (1) no ingress (uses static_key_false(&ingress_needed))
+#
+# (2) ingress on other dev (change ingress_needed and calls
+# handle_ing() but exit early)
+#
+# config: tc qdisc add dev $SOMEDEV handle ffff: ingress
+#
+# (3) ingress on this dev, handle_ing() -> tc_classify()
+#
+# config: tc qdisc add dev $DEV handle ffff: ingress
+#
+# (4) ingress on this dev + drop at u32 classifier/action.
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Using invalid DST_MAC will cause the packets to get dropped in
+# ip_rcv() which is part of the test
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ] && BURST=1024
+
+# Base Config
+DELAY="0" # Zero means max speed
+COUNT="10000000" # Zero means indefinitely
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+ # The device name is extended with @name, using thread number to
+ # make then unique, but any name will do.
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config of dev
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst $DEST_IP"
+
+ # Inject packet into RX path of stack
+ pg_set $dev "xmit_mode netif_receive"
+
+ # Burst allow us to avoid measuring SKB alloc/free overhead
+ pg_set $dev "burst $BURST"
+done
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+for ((thread = 0; thread < $THREADS; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+done
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
new file mode 100755
index 000000000000..8c9d318c221b
--- /dev/null
+++ b/samples/pktgen/pktgen_sample01_simple.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Simple example:
+# * pktgen sending with single thread and single interface
+# * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+# - go look in parameters.sh to see which setting are avail
+# - required param is the interface "-i" stored in $DEV
+source ${basedir}/parameters.sh
+#
+# Set some default params, if they didn't get set
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+# Example enforce param "-m" for dst_mac
+[ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac"
+
+# Base Config
+DELAY="0" # Zero means max speed
+COUNT="100000" # Zero means indefinitely
+
+# Flow variation random source port between min and max
+UDP_MIN=9
+UDP_MAX=109
+
+# General cleanup everything since last run
+# (especially important if other threads were configured by other scripts)
+pg_ctrl "reset"
+
+# Add remove all other devices and add_device $DEV to thread 0
+thread=0
+pg_thread $thread "rem_device_all"
+pg_thread $thread "add_device" $DEV
+
+# How many packets to send (zero means indefinitely)
+pg_set $DEV "count $COUNT"
+
+# Reduce alloc cost by sending same SKB many times
+# - this obviously affects the randomness within the packet
+pg_set $DEV "clone_skb $CLONE_SKB"
+
+# Set packet size
+pg_set $DEV "pkt_size $PKT_SIZE"
+
+# Delay between packets (zero means max speed)
+pg_set $DEV "delay $DELAY"
+
+# Flag example disabling timestamping
+pg_set $DEV "flag NO_TIMESTAMP"
+
+# Destination
+pg_set $DEV "dst_mac $DST_MAC"
+pg_set $DEV "dst $DEST_IP"
+
+# Setup random UDP port src range
+pg_set $DEV "flag UDPSRC_RND"
+pg_set $DEV "udp_src_min $UDP_MIN"
+pg_set $DEV "udp_src_max $UDP_MAX"
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+echo "Result device: $DEV"
+cat /proc/net/pktgen/$DEV
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
new file mode 100755
index 000000000000..32467aea8e47
--- /dev/null
+++ b/samples/pktgen/pktgen_sample02_multiqueue.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Multiqueue: Using pktgen threads for sending on multiple CPUs
+# * adding devices to kernel threads
+# * notice the naming scheme for keeping device names unique
+# * nameing scheme: dev@thread_number
+# * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+#
+# Required param: -i dev in $DEV
+source ${basedir}/parameters.sh
+
+# Base Config
+DELAY="0" # Zero means max speed
+COUNT="100000" # Zero means indefinitely
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+
+# Flow variation random source port between min and max
+UDP_MIN=9
+UDP_MAX=109
+
+# (example of setting default params in your script)
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+ # The device name is extended with @name, using thread number to
+ # make then unique, but any name will do.
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Notice config queue to map to cpu (mirrors smp_processor_id())
+ # It is beneficial to map IRQ /proc/irq/*/smp_affinity 1:1 to CPU number
+ pg_set $dev "flag QUEUE_MAP_CPU"
+
+ # Base config of dev
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+
+ # Flag example disabling timestamping
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst $DEST_IP"
+
+ # Setup random UDP port src range
+ pg_set $dev "flag UDPSRC_RND"
+ pg_set $dev "udp_src_min $UDP_MIN"
+ pg_set $dev "udp_src_max $UDP_MAX"
+done
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+for ((thread = 0; thread < $THREADS; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+done
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
new file mode 100755
index 000000000000..775f5d0a1e53
--- /dev/null
+++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+#
+# Script for max single flow performance
+# - If correctly tuned[1], single CPU 10G wirespeed small pkts is possible[2]
+#
+# Using pktgen "burst" option (use -b $N)
+# - To boost max performance
+# - Avail since: kernel v3.18
+# * commit 38b2cf2982dc73 ("net: pktgen: packet bursting via skb->xmit_more")
+# - This avoids writing the HW tailptr on every driver xmit
+# - The performance boost is impressive, see commit and blog [2]
+#
+# Notice: On purpose generates a single (UDP) flow towards target,
+# reason behind this is to only overload/activate a single CPU on
+# target host. And no randomness for pktgen also makes it faster.
+#
+# Tuning see:
+# [1] http://netoptimizer.blogspot.dk/2014/06/pktgen-for-network-overload-testing.html
+# [2] http://netoptimizer.blogspot.dk/2014/10/unlocked-10gbps-tx-wirespeed-smallest.html
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Set some default params, if they didn't get set
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ] && BURST=32
+[ -z "$CLONE_SKB" ] && CLONE_SKB="100000"
+
+# Base Config
+DELAY="0" # Zero means max speed
+COUNT="0" # Zero means indefinitely
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+ dev=${DEV}@${thread}
+
+ # Add remove all other devices and add_device $dev to thread
+ pg_thread $thread "rem_device_all"
+ pg_thread $thread "add_device" $dev
+
+ # Base config
+ pg_set $dev "flag QUEUE_MAP_CPU"
+ pg_set $dev "count $COUNT"
+ pg_set $dev "clone_skb $CLONE_SKB"
+ pg_set $dev "pkt_size $PKT_SIZE"
+ pg_set $dev "delay $DELAY"
+ pg_set $dev "flag NO_TIMESTAMP"
+
+ # Destination
+ pg_set $dev "dst_mac $DST_MAC"
+ pg_set $dev "dst $DEST_IP"
+
+ # Setup burst, for easy testing -b 0 disable bursting
+ # (internally in pktgen default and minimum burst=1)
+ if [[ ${BURST} -ne 0 ]]; then
+ pg_set $dev "burst $BURST"
+ else
+ info "$dev: Not using burst"
+ fi
+done
+
+# Run if user hits control-c
+function control_c() {
+ # Print results
+ for ((thread = 0; thread < $THREADS; thread++)); do
+ dev=${DEV}@${thread}
+ echo "Device: $dev"
+ cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+ done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap control_c SIGINT
+
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 6f6733331d95..08c2a36ef7a9 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -272,7 +272,7 @@ int main(int argc, char **argv)
const int expect_hash[2][2] = { { 15, 5 }, { 20, 5 } };
const int expect_hash_rb[2][2] = { { 15, 5 }, { 20, 15 } };
const int expect_lb[2][2] = { { 10, 10 }, { 18, 17 } };
- const int expect_rb[2][2] = { { 20, 0 }, { 20, 15 } };
+ const int expect_rb[2][2] = { { 15, 5 }, { 20, 15 } };
const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } };
const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } };
int port_off = 2, tries = 5, ret;